migrate moon code from github to opnfv 30/930/1
authorWuKong <rebirthmonkey@gmail.com>
Tue, 30 Jun 2015 16:47:29 +0000 (18:47 +0200)
committerWuKong <rebirthmonkey@gmail.com>
Tue, 30 Jun 2015 16:47:29 +0000 (18:47 +0200)
Change-Id: Ice53e368fd1114d56a75271aa9f2e598e3eba604
Signed-off-by: WuKong <rebirthmonkey@gmail.com>
612 files changed:
.gitignore [new file with mode: 0644]
keystone-moon/CONTRIBUTING.rst [new file with mode: 0644]
keystone-moon/HACKING.rst [new file with mode: 0644]
keystone-moon/LICENSE [new file with mode: 0644]
keystone-moon/MANIFEST.in [new file with mode: 0644]
keystone-moon/README.rst [new file with mode: 0644]
keystone-moon/babel.cfg [new file with mode: 0644]
keystone-moon/bin/keystone-all [new file with mode: 0755]
keystone-moon/bin/keystone-manage [new file with mode: 0755]
keystone-moon/config-generator/keystone.conf [new file with mode: 0644]
keystone-moon/doc/Makefile [new file with mode: 0644]
keystone-moon/doc/README.rst [new file with mode: 0644]
keystone-moon/doc/ext/__init__.py [new file with mode: 0644]
keystone-moon/doc/ext/apidoc.py [new file with mode: 0644]
keystone-moon/doc/keystone_compat_flows.sdx [new file with mode: 0644]
keystone-moon/doc/source/apache-httpd.rst [new file with mode: 0644]
keystone-moon/doc/source/api_curl_examples.rst [new file with mode: 0644]
keystone-moon/doc/source/architecture.rst [new file with mode: 0644]
keystone-moon/doc/source/cli_examples.rst [new file with mode: 0644]
keystone-moon/doc/source/community.rst [new file with mode: 0644]
keystone-moon/doc/source/conf.py [new file with mode: 0644]
keystone-moon/doc/source/configuration.rst [new file with mode: 0644]
keystone-moon/doc/source/configure_federation.rst [new file with mode: 0644]
keystone-moon/doc/source/configuringservices.rst [new file with mode: 0644]
keystone-moon/doc/source/developing.rst [new file with mode: 0644]
keystone-moon/doc/source/event_notifications.rst [new file with mode: 0644]
keystone-moon/doc/source/extension_development.rst [new file with mode: 0644]
keystone-moon/doc/source/extensions.rst [new file with mode: 0644]
keystone-moon/doc/source/extensions/endpoint_filter.rst [new file with mode: 0644]
keystone-moon/doc/source/extensions/endpoint_policy.rst [new file with mode: 0644]
keystone-moon/doc/source/extensions/federation.rst [new file with mode: 0644]
keystone-moon/doc/source/extensions/moon.rst [new file with mode: 0644]
keystone-moon/doc/source/extensions/moon_api.rst [new file with mode: 0644]
keystone-moon/doc/source/extensions/oauth1.rst [new file with mode: 0644]
keystone-moon/doc/source/extensions/openidc.rst [new file with mode: 0644]
keystone-moon/doc/source/extensions/revoke.rst [new file with mode: 0644]
keystone-moon/doc/source/extensions/shibboleth.rst [new file with mode: 0644]
keystone-moon/doc/source/external-auth.rst [new file with mode: 0644]
keystone-moon/doc/source/http-api.rst [new file with mode: 0644]
keystone-moon/doc/source/index.rst [new file with mode: 0644]
keystone-moon/doc/source/installing.rst [new file with mode: 0644]
keystone-moon/doc/source/key_terms.rst [new file with mode: 0644]
keystone-moon/doc/source/man/keystone-all.rst [new file with mode: 0644]
keystone-moon/doc/source/man/keystone-manage.rst [new file with mode: 0644]
keystone-moon/doc/source/middlewarearchitecture.rst [new file with mode: 0644]
keystone-moon/doc/source/setup.rst [new file with mode: 0644]
keystone-moon/etc/default_catalog.templates [new file with mode: 0644]
keystone-moon/etc/keystone-paste.ini [new file with mode: 0644]
keystone-moon/etc/keystone.conf.sample [new file with mode: 0644]
keystone-moon/etc/logging.conf.sample [new file with mode: 0644]
keystone-moon/etc/policy.json [new file with mode: 0644]
keystone-moon/etc/policy.v3cloudsample.json [new file with mode: 0644]
keystone-moon/etc/sso_callback_template.html [new file with mode: 0644]
keystone-moon/examples/moon/__init__.py [new file with mode: 0644]
keystone-moon/examples/moon/policies/mls_conf/authz/assignment.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/mls_conf/authz/metadata.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/mls_conf/authz/metarule.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/mls_conf/authz/rules.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/mls_conf/authz/scope.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_mls_admin/assignment.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_mls_admin/metadata.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_mls_admin/metarule.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_mls_admin/perimeter.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_mls_admin/rules.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_mls_admin/scope.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_mls_authz/assignment.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_mls_authz/metadata.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_mls_authz/metarule.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_mls_authz/perimeter.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_mls_authz/rules.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_mls_authz/scope.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_rbac_admin/assignment.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_rbac_admin/metadata.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_rbac_admin/metarule.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_rbac_admin/perimeter.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_rbac_admin/rules.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_rbac_admin/scope.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_rbac_authz/assignment.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_rbac_authz/metadata.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_rbac_authz/metarule.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_rbac_authz/perimeter.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_rbac_authz/rules.json [new file with mode: 0644]
keystone-moon/examples/moon/policies/policy_rbac_authz/scope.json [new file with mode: 0644]
keystone-moon/examples/moon/super_extension/policy/assignment.json [new file with mode: 0644]
keystone-moon/examples/moon/super_extension/policy/configuration.json [new file with mode: 0644]
keystone-moon/examples/moon/super_extension/policy/metadata.json [new file with mode: 0644]
keystone-moon/examples/moon/super_extension/policy/perimeter.json [new file with mode: 0644]
keystone-moon/examples/pki/certs/cacert.pem [new file with mode: 0644]
keystone-moon/examples/pki/certs/middleware.pem [new file with mode: 0644]
keystone-moon/examples/pki/certs/signing_cert.pem [new file with mode: 0644]
keystone-moon/examples/pki/certs/ssl_cert.pem [new file with mode: 0644]
keystone-moon/examples/pki/cms/auth_token_revoked.json [new file with mode: 0644]
keystone-moon/examples/pki/cms/auth_token_revoked.pem [new file with mode: 0644]
keystone-moon/examples/pki/cms/auth_token_scoped.json [new file with mode: 0644]
keystone-moon/examples/pki/cms/auth_token_scoped.pem [new file with mode: 0644]
keystone-moon/examples/pki/cms/auth_token_unscoped.json [new file with mode: 0644]
keystone-moon/examples/pki/cms/auth_token_unscoped.pem [new file with mode: 0644]
keystone-moon/examples/pki/cms/revocation_list.json [new file with mode: 0644]
keystone-moon/examples/pki/cms/revocation_list.pem [new file with mode: 0644]
keystone-moon/examples/pki/gen_pki.sh [new file with mode: 0755]
keystone-moon/examples/pki/private/cakey.pem [new file with mode: 0644]
keystone-moon/examples/pki/private/signing_key.pem [new file with mode: 0644]
keystone-moon/examples/pki/private/ssl_key.pem [new file with mode: 0644]
keystone-moon/httpd/README [new file with mode: 0644]
keystone-moon/httpd/keystone.py [new file with mode: 0644]
keystone-moon/httpd/wsgi-keystone.conf [new file with mode: 0644]
keystone-moon/keystone/__init__.py [new file with mode: 0644]
keystone-moon/keystone/assignment/__init__.py [new file with mode: 0644]
keystone-moon/keystone/assignment/backends/__init__.py [new file with mode: 0644]
keystone-moon/keystone/assignment/backends/ldap.py [new file with mode: 0644]
keystone-moon/keystone/assignment/backends/sql.py [new file with mode: 0644]
keystone-moon/keystone/assignment/controllers.py [new file with mode: 0644]
keystone-moon/keystone/assignment/core.py [new file with mode: 0644]
keystone-moon/keystone/assignment/role_backends/__init__.py [new file with mode: 0644]
keystone-moon/keystone/assignment/role_backends/ldap.py [new file with mode: 0644]
keystone-moon/keystone/assignment/role_backends/sql.py [new file with mode: 0644]
keystone-moon/keystone/assignment/routers.py [new file with mode: 0644]
keystone-moon/keystone/assignment/schema.py [new file with mode: 0644]
keystone-moon/keystone/auth/__init__.py [new file with mode: 0644]
keystone-moon/keystone/auth/controllers.py [new file with mode: 0644]
keystone-moon/keystone/auth/core.py [new file with mode: 0644]
keystone-moon/keystone/auth/plugins/__init__.py [new file with mode: 0644]
keystone-moon/keystone/auth/plugins/core.py [new file with mode: 0644]
keystone-moon/keystone/auth/plugins/external.py [new file with mode: 0644]
keystone-moon/keystone/auth/plugins/mapped.py [new file with mode: 0644]
keystone-moon/keystone/auth/plugins/oauth1.py [new file with mode: 0644]
keystone-moon/keystone/auth/plugins/password.py [new file with mode: 0644]
keystone-moon/keystone/auth/plugins/saml2.py [new file with mode: 0644]
keystone-moon/keystone/auth/plugins/token.py [new file with mode: 0644]
keystone-moon/keystone/auth/routers.py [new file with mode: 0644]
keystone-moon/keystone/backends.py [new file with mode: 0644]
keystone-moon/keystone/catalog/__init__.py [new file with mode: 0644]
keystone-moon/keystone/catalog/backends/__init__.py [new file with mode: 0644]
keystone-moon/keystone/catalog/backends/kvs.py [new file with mode: 0644]
keystone-moon/keystone/catalog/backends/sql.py [new file with mode: 0644]
keystone-moon/keystone/catalog/backends/templated.py [new file with mode: 0644]
keystone-moon/keystone/catalog/controllers.py [new file with mode: 0644]
keystone-moon/keystone/catalog/core.py [new file with mode: 0644]
keystone-moon/keystone/catalog/routers.py [new file with mode: 0644]
keystone-moon/keystone/catalog/schema.py [new file with mode: 0644]
keystone-moon/keystone/clean.py [new file with mode: 0644]
keystone-moon/keystone/cli.py [new file with mode: 0644]
keystone-moon/keystone/common/__init__.py [new file with mode: 0644]
keystone-moon/keystone/common/authorization.py [new file with mode: 0644]
keystone-moon/keystone/common/base64utils.py [new file with mode: 0644]
keystone-moon/keystone/common/cache/__init__.py [new file with mode: 0644]
keystone-moon/keystone/common/cache/_memcache_pool.py [new file with mode: 0644]
keystone-moon/keystone/common/cache/backends/__init__.py [new file with mode: 0644]
keystone-moon/keystone/common/cache/backends/memcache_pool.py [new file with mode: 0644]
keystone-moon/keystone/common/cache/backends/mongo.py [new file with mode: 0644]
keystone-moon/keystone/common/cache/backends/noop.py [new file with mode: 0644]
keystone-moon/keystone/common/cache/core.py [new file with mode: 0644]
keystone-moon/keystone/common/config.py [new file with mode: 0644]
keystone-moon/keystone/common/controller.py [new file with mode: 0644]
keystone-moon/keystone/common/dependency.py [new file with mode: 0644]
keystone-moon/keystone/common/driver_hints.py [new file with mode: 0644]
keystone-moon/keystone/common/environment/__init__.py [new file with mode: 0644]
keystone-moon/keystone/common/environment/eventlet_server.py [new file with mode: 0644]
keystone-moon/keystone/common/extension.py [new file with mode: 0644]
keystone-moon/keystone/common/json_home.py [new file with mode: 0644]
keystone-moon/keystone/common/kvs/__init__.py [new file with mode: 0644]
keystone-moon/keystone/common/kvs/backends/__init__.py [new file with mode: 0644]
keystone-moon/keystone/common/kvs/backends/inmemdb.py [new file with mode: 0644]
keystone-moon/keystone/common/kvs/backends/memcached.py [new file with mode: 0644]
keystone-moon/keystone/common/kvs/core.py [new file with mode: 0644]
keystone-moon/keystone/common/kvs/legacy.py [new file with mode: 0644]
keystone-moon/keystone/common/ldap/__init__.py [new file with mode: 0644]
keystone-moon/keystone/common/ldap/core.py [new file with mode: 0644]
keystone-moon/keystone/common/manager.py [new file with mode: 0644]
keystone-moon/keystone/common/models.py [new file with mode: 0644]
keystone-moon/keystone/common/openssl.py [new file with mode: 0644]
keystone-moon/keystone/common/pemutils.py [new file with mode: 0755]
keystone-moon/keystone/common/router.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/__init__.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/core.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/README [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/__init__.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/manage.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/migrate.cfg [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/044_icehouse.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/045_placeholder.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/046_placeholder.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/047_placeholder.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/048_placeholder.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/049_placeholder.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/050_fk_consistent_indexes.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/051_add_id_mapping.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/052_add_auth_url_to_region.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/053_endpoint_to_region_association.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/054_add_actor_id_index.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/055_add_indexes_to_token_table.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/056_placeholder.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/057_placeholder.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/058_placeholder.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/059_placeholder.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/060_placeholder.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/061_add_parent_project.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/062_drop_assignment_role_fk.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/063_drop_region_auth_url.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/064_drop_user_and_group_fk.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/065_add_domain_config.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migrate_repo/versions/__init__.py [new file with mode: 0644]
keystone-moon/keystone/common/sql/migration_helpers.py [new file with mode: 0644]
keystone-moon/keystone/common/utils.py [new file with mode: 0644]
keystone-moon/keystone/common/validation/__init__.py [new file with mode: 0644]
keystone-moon/keystone/common/validation/parameter_types.py [new file with mode: 0644]
keystone-moon/keystone/common/validation/validators.py [new file with mode: 0644]
keystone-moon/keystone/common/wsgi.py [new file with mode: 0644]
keystone-moon/keystone/config.py [new file with mode: 0644]
keystone-moon/keystone/contrib/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/admin_crud/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/admin_crud/core.py [new file with mode: 0644]
keystone-moon/keystone/contrib/ec2/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/ec2/controllers.py [new file with mode: 0644]
keystone-moon/keystone/contrib/ec2/core.py [new file with mode: 0644]
keystone-moon/keystone/contrib/ec2/routers.py [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_filter/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_filter/backends/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_filter/backends/catalog_sql.py [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_filter/backends/sql.py [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_filter/controllers.py [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_filter/core.py [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/migrate.cfg [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/001_add_endpoint_filtering_table.py [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/002_add_endpoint_groups.py [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_filter/routers.py [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_filter/schema.py [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_policy/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_policy/backends/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_policy/backends/sql.py [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_policy/controllers.py [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_policy/core.py [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_policy/migrate_repo/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_policy/migrate_repo/migrate.cfg [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_policy/migrate_repo/versions/001_add_endpoint_policy_table.py [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_policy/migrate_repo/versions/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/endpoint_policy/routers.py [new file with mode: 0644]
keystone-moon/keystone/contrib/example/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/example/configuration.rst [new file with mode: 0644]
keystone-moon/keystone/contrib/example/controllers.py [new file with mode: 0644]
keystone-moon/keystone/contrib/example/core.py [new file with mode: 0644]
keystone-moon/keystone/contrib/example/migrate_repo/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/example/migrate_repo/migrate.cfg [new file with mode: 0644]
keystone-moon/keystone/contrib/example/migrate_repo/versions/001_example_table.py [new file with mode: 0644]
keystone-moon/keystone/contrib/example/migrate_repo/versions/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/example/routers.py [new file with mode: 0644]
keystone-moon/keystone/contrib/federation/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/federation/backends/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/federation/backends/sql.py [new file with mode: 0644]
keystone-moon/keystone/contrib/federation/controllers.py [new file with mode: 0644]
keystone-moon/keystone/contrib/federation/core.py [new file with mode: 0644]
keystone-moon/keystone/contrib/federation/idp.py [new file with mode: 0644]
keystone-moon/keystone/contrib/federation/migrate_repo/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/federation/migrate_repo/migrate.cfg [new file with mode: 0644]
keystone-moon/keystone/contrib/federation/migrate_repo/versions/001_add_identity_provider_table.py [new file with mode: 0644]
keystone-moon/keystone/contrib/federation/migrate_repo/versions/002_add_mapping_tables.py [new file with mode: 0644]
keystone-moon/keystone/contrib/federation/migrate_repo/versions/003_mapping_id_nullable_false.py [new file with mode: 0644]
keystone-moon/keystone/contrib/federation/migrate_repo/versions/004_add_remote_id_column.py [new file with mode: 0644]
keystone-moon/keystone/contrib/federation/migrate_repo/versions/005_add_service_provider_table.py [new file with mode: 0644]
keystone-moon/keystone/contrib/federation/migrate_repo/versions/006_fixup_service_provider_attributes.py [new file with mode: 0644]
keystone-moon/keystone/contrib/federation/migrate_repo/versions/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/federation/routers.py [new file with mode: 0644]
keystone-moon/keystone/contrib/federation/schema.py [new file with mode: 0644]
keystone-moon/keystone/contrib/federation/utils.py [new file with mode: 0644]
keystone-moon/keystone/contrib/moon/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/moon/backends/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/moon/backends/flat.py [new file with mode: 0644]
keystone-moon/keystone/contrib/moon/backends/sql.py [new file with mode: 0644]
keystone-moon/keystone/contrib/moon/controllers.py [new file with mode: 0644]
keystone-moon/keystone/contrib/moon/core.py [new file with mode: 0644]
keystone-moon/keystone/contrib/moon/exception.py [new file with mode: 0644]
keystone-moon/keystone/contrib/moon/extension.py [new file with mode: 0644]
keystone-moon/keystone/contrib/moon/migrate_repo/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/moon/migrate_repo/migrate.cfg [new file with mode: 0644]
keystone-moon/keystone/contrib/moon/migrate_repo/versions/001_moon.py [new file with mode: 0644]
keystone-moon/keystone/contrib/moon/migrate_repo/versions/002_moon.py [new file with mode: 0644]
keystone-moon/keystone/contrib/moon/migrate_repo/versions/003_moon.py [new file with mode: 0644]
keystone-moon/keystone/contrib/moon/routers.py [new file with mode: 0644]
keystone-moon/keystone/contrib/oauth1/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/oauth1/backends/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/oauth1/backends/sql.py [new file with mode: 0644]
keystone-moon/keystone/contrib/oauth1/controllers.py [new file with mode: 0644]
keystone-moon/keystone/contrib/oauth1/core.py [new file with mode: 0644]
keystone-moon/keystone/contrib/oauth1/migrate_repo/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/oauth1/migrate_repo/migrate.cfg [new file with mode: 0644]
keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py [new file with mode: 0644]
keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/002_fix_oauth_tables_fk.py [new file with mode: 0644]
keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/003_consumer_description_nullalbe.py [new file with mode: 0644]
keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py [new file with mode: 0644]
keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/005_consumer_id_index.py [new file with mode: 0644]
keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/oauth1/routers.py [new file with mode: 0644]
keystone-moon/keystone/contrib/oauth1/validator.py [new file with mode: 0644]
keystone-moon/keystone/contrib/revoke/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/revoke/backends/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/revoke/backends/kvs.py [new file with mode: 0644]
keystone-moon/keystone/contrib/revoke/backends/sql.py [new file with mode: 0644]
keystone-moon/keystone/contrib/revoke/controllers.py [new file with mode: 0644]
keystone-moon/keystone/contrib/revoke/core.py [new file with mode: 0644]
keystone-moon/keystone/contrib/revoke/migrate_repo/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/revoke/migrate_repo/migrate.cfg [new file with mode: 0644]
keystone-moon/keystone/contrib/revoke/migrate_repo/versions/001_revoke_table.py [new file with mode: 0644]
keystone-moon/keystone/contrib/revoke/migrate_repo/versions/002_add_audit_id_and_chain_to_revoke_table.py [new file with mode: 0644]
keystone-moon/keystone/contrib/revoke/migrate_repo/versions/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/revoke/model.py [new file with mode: 0644]
keystone-moon/keystone/contrib/revoke/routers.py [new file with mode: 0644]
keystone-moon/keystone/contrib/s3/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/s3/core.py [new file with mode: 0644]
keystone-moon/keystone/contrib/simple_cert/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/simple_cert/controllers.py [new file with mode: 0644]
keystone-moon/keystone/contrib/simple_cert/core.py [new file with mode: 0644]
keystone-moon/keystone/contrib/simple_cert/routers.py [new file with mode: 0644]
keystone-moon/keystone/contrib/user_crud/__init__.py [new file with mode: 0644]
keystone-moon/keystone/contrib/user_crud/core.py [new file with mode: 0644]
keystone-moon/keystone/controllers.py [new file with mode: 0644]
keystone-moon/keystone/credential/__init__.py [new file with mode: 0644]
keystone-moon/keystone/credential/backends/__init__.py [new file with mode: 0644]
keystone-moon/keystone/credential/backends/sql.py [new file with mode: 0644]
keystone-moon/keystone/credential/controllers.py [new file with mode: 0644]
keystone-moon/keystone/credential/core.py [new file with mode: 0644]
keystone-moon/keystone/credential/routers.py [new file with mode: 0644]
keystone-moon/keystone/credential/schema.py [new file with mode: 0644]
keystone-moon/keystone/exception.py [new file with mode: 0644]
keystone-moon/keystone/hacking/__init__.py [new file with mode: 0644]
keystone-moon/keystone/hacking/checks.py [new file with mode: 0644]
keystone-moon/keystone/i18n.py [new file with mode: 0644]
keystone-moon/keystone/identity/__init__.py [new file with mode: 0644]
keystone-moon/keystone/identity/backends/__init__.py [new file with mode: 0644]
keystone-moon/keystone/identity/backends/ldap.py [new file with mode: 0644]
keystone-moon/keystone/identity/backends/sql.py [new file with mode: 0644]
keystone-moon/keystone/identity/controllers.py [new file with mode: 0644]
keystone-moon/keystone/identity/core.py [new file with mode: 0644]
keystone-moon/keystone/identity/generator.py [new file with mode: 0644]
keystone-moon/keystone/identity/id_generators/__init__.py [new file with mode: 0644]
keystone-moon/keystone/identity/id_generators/sha256.py [new file with mode: 0644]
keystone-moon/keystone/identity/mapping_backends/__init__.py [new file with mode: 0644]
keystone-moon/keystone/identity/mapping_backends/mapping.py [new file with mode: 0644]
keystone-moon/keystone/identity/mapping_backends/sql.py [new file with mode: 0644]
keystone-moon/keystone/identity/routers.py [new file with mode: 0644]
keystone-moon/keystone/locale/de/LC_MESSAGES/keystone-log-critical.po [new file with mode: 0644]
keystone-moon/keystone/locale/de/LC_MESSAGES/keystone-log-info.po [new file with mode: 0644]
keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone-log-critical.po [new file with mode: 0644]
keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone-log-error.po [new file with mode: 0644]
keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone.po [new file with mode: 0644]
keystone-moon/keystone/locale/en_GB/LC_MESSAGES/keystone-log-info.po [new file with mode: 0644]
keystone-moon/keystone/locale/es/LC_MESSAGES/keystone-log-critical.po [new file with mode: 0644]
keystone-moon/keystone/locale/es/LC_MESSAGES/keystone-log-error.po [new file with mode: 0644]
keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-critical.po [new file with mode: 0644]
keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-error.po [new file with mode: 0644]
keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-info.po [new file with mode: 0644]
keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-warning.po [new file with mode: 0644]
keystone-moon/keystone/locale/hu/LC_MESSAGES/keystone-log-critical.po [new file with mode: 0644]
keystone-moon/keystone/locale/it/LC_MESSAGES/keystone-log-critical.po [new file with mode: 0644]
keystone-moon/keystone/locale/it/LC_MESSAGES/keystone-log-error.po [new file with mode: 0644]
keystone-moon/keystone/locale/it/LC_MESSAGES/keystone-log-info.po [new file with mode: 0644]
keystone-moon/keystone/locale/ja/LC_MESSAGES/keystone-log-critical.po [new file with mode: 0644]
keystone-moon/keystone/locale/ja/LC_MESSAGES/keystone-log-error.po [new file with mode: 0644]
keystone-moon/keystone/locale/keystone-log-critical.pot [new file with mode: 0644]
keystone-moon/keystone/locale/keystone-log-error.pot [new file with mode: 0644]
keystone-moon/keystone/locale/keystone-log-info.pot [new file with mode: 0644]
keystone-moon/keystone/locale/keystone-log-warning.pot [new file with mode: 0644]
keystone-moon/keystone/locale/keystone.pot [new file with mode: 0644]
keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-critical.po [new file with mode: 0644]
keystone-moon/keystone/locale/pl_PL/LC_MESSAGES/keystone-log-critical.po [new file with mode: 0644]
keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone-log-critical.po [new file with mode: 0644]
keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone-log-error.po [new file with mode: 0644]
keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone.po [new file with mode: 0644]
keystone-moon/keystone/locale/ru/LC_MESSAGES/keystone-log-critical.po [new file with mode: 0644]
keystone-moon/keystone/locale/vi_VN/LC_MESSAGES/keystone-log-info.po [new file with mode: 0644]
keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-critical.po [new file with mode: 0644]
keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-error.po [new file with mode: 0644]
keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-info.po [new file with mode: 0644]
keystone-moon/keystone/locale/zh_TW/LC_MESSAGES/keystone-log-critical.po [new file with mode: 0644]
keystone-moon/keystone/middleware/__init__.py [new file with mode: 0644]
keystone-moon/keystone/middleware/core.py [new file with mode: 0644]
keystone-moon/keystone/middleware/ec2_token.py [new file with mode: 0644]
keystone-moon/keystone/models/__init__.py [new file with mode: 0644]
keystone-moon/keystone/models/token_model.py [new file with mode: 0644]
keystone-moon/keystone/notifications.py [new file with mode: 0644]
keystone-moon/keystone/openstack/__init__.py [new file with mode: 0644]
keystone-moon/keystone/openstack/common/README [new file with mode: 0644]
keystone-moon/keystone/openstack/common/__init__.py [new file with mode: 0644]
keystone-moon/keystone/openstack/common/_i18n.py [new file with mode: 0644]
keystone-moon/keystone/openstack/common/eventlet_backdoor.py [new file with mode: 0644]
keystone-moon/keystone/openstack/common/fileutils.py [new file with mode: 0644]
keystone-moon/keystone/openstack/common/loopingcall.py [new file with mode: 0644]
keystone-moon/keystone/openstack/common/service.py [new file with mode: 0644]
keystone-moon/keystone/openstack/common/systemd.py [new file with mode: 0644]
keystone-moon/keystone/openstack/common/threadgroup.py [new file with mode: 0644]
keystone-moon/keystone/openstack/common/versionutils.py [new file with mode: 0644]
keystone-moon/keystone/policy/__init__.py [new file with mode: 0644]
keystone-moon/keystone/policy/backends/__init__.py [new file with mode: 0644]
keystone-moon/keystone/policy/backends/rules.py [new file with mode: 0644]
keystone-moon/keystone/policy/backends/sql.py [new file with mode: 0644]
keystone-moon/keystone/policy/controllers.py [new file with mode: 0644]
keystone-moon/keystone/policy/core.py [new file with mode: 0644]
keystone-moon/keystone/policy/routers.py [new file with mode: 0644]
keystone-moon/keystone/policy/schema.py [new file with mode: 0644]
keystone-moon/keystone/resource/__init__.py [new file with mode: 0644]
keystone-moon/keystone/resource/backends/__init__.py [new file with mode: 0644]
keystone-moon/keystone/resource/backends/ldap.py [new file with mode: 0644]
keystone-moon/keystone/resource/backends/sql.py [new file with mode: 0644]
keystone-moon/keystone/resource/config_backends/__init__.py [new file with mode: 0644]
keystone-moon/keystone/resource/config_backends/sql.py [new file with mode: 0644]
keystone-moon/keystone/resource/controllers.py [new file with mode: 0644]
keystone-moon/keystone/resource/core.py [new file with mode: 0644]
keystone-moon/keystone/resource/routers.py [new file with mode: 0644]
keystone-moon/keystone/resource/schema.py [new file with mode: 0644]
keystone-moon/keystone/routers.py [new file with mode: 0644]
keystone-moon/keystone/server/__init__.py [new file with mode: 0644]
keystone-moon/keystone/server/common.py [new file with mode: 0644]
keystone-moon/keystone/server/eventlet.py [new file with mode: 0644]
keystone-moon/keystone/server/wsgi.py [new file with mode: 0644]
keystone-moon/keystone/service.py [new file with mode: 0644]
keystone-moon/keystone/tests/__init__.py [new file with mode: 0644]
keystone-moon/keystone/tests/moon/__init__.py [new file with mode: 0644]
keystone-moon/keystone/tests/moon/func/__init__.py [new file with mode: 0644]
keystone-moon/keystone/tests/moon/func/test_func_api_authz.py [new file with mode: 0644]
keystone-moon/keystone/tests/moon/func/test_func_api_intra_extension_admin.py [new file with mode: 0644]
keystone-moon/keystone/tests/moon/func/test_func_api_log.py [new file with mode: 0644]
keystone-moon/keystone/tests/moon/func/test_func_api_tenant.py [new file with mode: 0644]
keystone-moon/keystone/tests/moon/unit/__init__.py [new file with mode: 0644]
keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_admin.py [new file with mode: 0644]
keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_authz.py [new file with mode: 0644]
keystone-moon/keystone/tests/moon/unit/test_unit_core_log.py [new file with mode: 0644]
keystone-moon/keystone/tests/moon/unit/test_unit_core_tenant.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/__init__.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/backend/__init__.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/backend/core_ldap.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/backend/core_sql.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/backend/domain_config/__init__.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/backend/domain_config/core.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/backend/domain_config/test_sql.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/backend/role/__init__.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/backend/role/core.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/backend/role/test_ldap.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/backend/role/test_sql.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/catalog/__init__.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/catalog/test_core.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/common/__init__.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/common/test_base64utils.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/common/test_connection_pool.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/common/test_injection.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/common/test_json_home.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/common/test_ldap.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/common/test_notifications.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/common/test_pemutils.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/common/test_sql_core.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/common/test_utils.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/config_files/backend_db2.conf [new file with mode: 0644]
keystone-moon/keystone/tests/unit/config_files/backend_ldap.conf [new file with mode: 0644]
keystone-moon/keystone/tests/unit/config_files/backend_ldap_pool.conf [new file with mode: 0644]
keystone-moon/keystone/tests/unit/config_files/backend_ldap_sql.conf [new file with mode: 0644]
keystone-moon/keystone/tests/unit/config_files/backend_liveldap.conf [new file with mode: 0644]
keystone-moon/keystone/tests/unit/config_files/backend_multi_ldap_sql.conf [new file with mode: 0644]
keystone-moon/keystone/tests/unit/config_files/backend_mysql.conf [new file with mode: 0644]
keystone-moon/keystone/tests/unit/config_files/backend_pool_liveldap.conf [new file with mode: 0644]
keystone-moon/keystone/tests/unit/config_files/backend_postgresql.conf [new file with mode: 0644]
keystone-moon/keystone/tests/unit/config_files/backend_sql.conf [new file with mode: 0644]
keystone-moon/keystone/tests/unit/config_files/backend_tls_liveldap.conf [new file with mode: 0644]
keystone-moon/keystone/tests/unit/config_files/deprecated.conf [new file with mode: 0644]
keystone-moon/keystone/tests/unit/config_files/deprecated_override.conf [new file with mode: 0644]
keystone-moon/keystone/tests/unit/config_files/domain_configs_default_ldap_one_sql/keystone.domain1.conf [new file with mode: 0644]
keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.Default.conf [new file with mode: 0644]
keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain1.conf [new file with mode: 0644]
keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain2.conf [new file with mode: 0644]
keystone-moon/keystone/tests/unit/config_files/domain_configs_one_extra_sql/keystone.domain2.conf [new file with mode: 0644]
keystone-moon/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.Default.conf [new file with mode: 0644]
keystone-moon/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.domain1.conf [new file with mode: 0644]
keystone-moon/keystone/tests/unit/config_files/test_auth_plugin.conf [new file with mode: 0644]
keystone-moon/keystone/tests/unit/core.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/default_catalog.templates [new file with mode: 0644]
keystone-moon/keystone/tests/unit/default_fixtures.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/fakeldap.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/federation_fixtures.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/filtering.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/identity/__init__.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/identity/test_core.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/identity_mapping.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/ksfixtures/__init__.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/ksfixtures/appserver.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/ksfixtures/cache.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/ksfixtures/database.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/ksfixtures/hacking.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/ksfixtures/key_repository.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/ksfixtures/temporaryfile.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/mapping_fixtures.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/rest.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/saml2/idp_saml2_metadata.xml [new file with mode: 0644]
keystone-moon/keystone/tests/unit/saml2/signed_saml2_assertion.xml [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_auth.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_auth_plugin.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_backend.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_backend_endpoint_policy.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_backend_endpoint_policy_sql.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_backend_federation_sql.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_backend_id_mapping_sql.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_backend_kvs.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_backend_ldap.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_backend_ldap_pool.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_backend_rules.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_backend_sql.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_backend_templated.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_cache.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_cache_backend_mongo.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_catalog.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_cert_setup.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_cli.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_config.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_contrib_s3_core.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_contrib_simple_cert.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_driver_hints.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_ec2_token_middleware.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_exception.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_hacking_checks.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_ipv6.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_kvs.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_ldap_livetest.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_ldap_pool_livetest.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_ldap_tls_livetest.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_middleware.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_no_admin_token_auth.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_policy.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_revoke.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_singular_plural.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_sql_livetest.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_sql_migrate_extensions.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_sql_upgrade.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_ssl.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_token_bind.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_token_provider.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_url_middleware.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_v2.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_v2_controller.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_v2_keystoneclient.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_v2_keystoneclient_sql.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_v3.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_v3_assignment.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_v3_auth.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_v3_catalog.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_v3_controller.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_v3_credential.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_v3_domain_config.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_v3_endpoint_policy.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_v3_federation.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_v3_filters.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_v3_identity.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_v3_oauth1.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_v3_os_revoke.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_v3_policy.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_v3_protection.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_validation.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_versions.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/test_wsgi.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/tests/__init__.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/tests/test_core.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/tests/test_utils.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/token/__init__.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/token/test_fernet_provider.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/token/test_provider.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/token/test_token_data_helper.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/token/test_token_model.py [new file with mode: 0644]
keystone-moon/keystone/tests/unit/utils.py [new file with mode: 0644]
keystone-moon/keystone/token/__init__.py [new file with mode: 0644]
keystone-moon/keystone/token/controllers.py [new file with mode: 0644]
keystone-moon/keystone/token/persistence/__init__.py [new file with mode: 0644]
keystone-moon/keystone/token/persistence/backends/__init__.py [new file with mode: 0644]
keystone-moon/keystone/token/persistence/backends/kvs.py [new file with mode: 0644]
keystone-moon/keystone/token/persistence/backends/memcache.py [new file with mode: 0644]
keystone-moon/keystone/token/persistence/backends/memcache_pool.py [new file with mode: 0644]
keystone-moon/keystone/token/persistence/backends/sql.py [new file with mode: 0644]
keystone-moon/keystone/token/persistence/core.py [new file with mode: 0644]
keystone-moon/keystone/token/provider.py [new file with mode: 0644]
keystone-moon/keystone/token/providers/__init__.py [new file with mode: 0644]
keystone-moon/keystone/token/providers/common.py [new file with mode: 0644]
keystone-moon/keystone/token/providers/fernet/__init__.py [new file with mode: 0644]
keystone-moon/keystone/token/providers/fernet/core.py [new file with mode: 0644]
keystone-moon/keystone/token/providers/fernet/token_formatters.py [new file with mode: 0644]
keystone-moon/keystone/token/providers/fernet/utils.py [new file with mode: 0644]
keystone-moon/keystone/token/providers/pki.py [new file with mode: 0644]
keystone-moon/keystone/token/providers/pkiz.py [new file with mode: 0644]
keystone-moon/keystone/token/providers/uuid.py [new file with mode: 0644]
keystone-moon/keystone/token/routers.py [new file with mode: 0644]
keystone-moon/keystone/trust/__init__.py [new file with mode: 0644]
keystone-moon/keystone/trust/backends/__init__.py [new file with mode: 0644]
keystone-moon/keystone/trust/backends/sql.py [new file with mode: 0644]
keystone-moon/keystone/trust/controllers.py [new file with mode: 0644]
keystone-moon/keystone/trust/core.py [new file with mode: 0644]
keystone-moon/keystone/trust/routers.py [new file with mode: 0644]
keystone-moon/keystone/trust/schema.py [new file with mode: 0644]
keystone-moon/openstack-common.conf [new file with mode: 0644]
keystone-moon/rally-scenarios/README.rst [new file with mode: 0644]
keystone-moon/rally-scenarios/keystone.yaml [new file with mode: 0644]
keystone-moon/requirements-py3.txt [new file with mode: 0644]
keystone-moon/requirements.txt [new file with mode: 0644]
keystone-moon/run_tests.sh [new file with mode: 0755]
keystone-moon/setup.cfg [new file with mode: 0644]
keystone-moon/setup.py [new file with mode: 0644]
keystone-moon/test-requirements-py3.txt [new file with mode: 0644]
keystone-moon/test-requirements.txt [new file with mode: 0644]
keystone-moon/tools/colorizer.py [new file with mode: 0755]
keystone-moon/tools/convert_to_sqlite.sh [new file with mode: 0755]
keystone-moon/tools/install_venv.py [new file with mode: 0644]
keystone-moon/tools/install_venv_common.py [new file with mode: 0644]
keystone-moon/tools/pretty_tox.sh [new file with mode: 0644]
keystone-moon/tools/sample_data.sh [new file with mode: 0755]
keystone-moon/tools/with_venv.sh [new file with mode: 0755]
keystone-moon/tox.ini [new file with mode: 0644]

diff --git a/.gitignore b/.gitignore
new file mode 100644 (file)
index 0000000..3588568
--- /dev/null
@@ -0,0 +1,36 @@
+*.pyc
+*.sw?
+*.egg/
+vendor
+.ksl-venv
+.venv
+.update-venv/
+.tox
+keystone.egg-info/
+*.log
+.coverage
+coverage.xml
+cover/*
+covhtml
+pep8.txt
+nosetests.xml
+doc/build
+.DS_Store
+doc/source/api
+doc/source/modules.rst
+ChangeLog
+AUTHORS
+build/
+dist/
+etc/keystone.conf
+etc/logging.conf
+etc/keystone/
+keystone/tests/tmp/
+.project
+.pydevproject
+keystone/locale/*/LC_MESSAGES/*.mo
+.testrepository/
+*.db
+.idea/
+keystone/contrib/moon_v2/
+vagrant*/
\ No newline at end of file
diff --git a/keystone-moon/CONTRIBUTING.rst b/keystone-moon/CONTRIBUTING.rst
new file mode 100644 (file)
index 0000000..fc3d366
--- /dev/null
@@ -0,0 +1,16 @@
+If you would like to contribute to the development of OpenStack,
+you must follow the steps documented at:
+
+   http://wiki.openstack.org/HowToContribute#If_you.27re_a_developer
+
+Once those steps have been completed, changes to OpenStack
+should be submitted for review via the Gerrit tool, following
+the workflow documented at:
+
+   http://wiki.openstack.org/GerritWorkflow
+
+Pull requests submitted through GitHub will be ignored.
+
+Bugs should be filed on Launchpad, not GitHub:
+
+   https://bugs.launchpad.net/keystone
diff --git a/keystone-moon/HACKING.rst b/keystone-moon/HACKING.rst
new file mode 100644 (file)
index 0000000..86bce20
--- /dev/null
@@ -0,0 +1,58 @@
+Keystone Style Commandments
+===========================
+
+- Step 1: Read the OpenStack Style Commandments
+  http://docs.openstack.org/developer/hacking/
+- Step 2: Read on
+
+Keystone Specific Commandments
+------------------------------
+
+- Avoid using "double quotes" where you can reasonably use 'single quotes'
+
+
+TODO vs FIXME
+-------------
+
+- TODO(name): implies that something should be done (cleanup, refactoring,
+  etc), but is expected to be functional.
+- FIXME(name): implies that the method/function/etc shouldn't be used until
+  that code is resolved and bug fixed.
+
+
+Logging
+-------
+
+Use the common logging module, and ensure you ``getLogger``::
+
+    from oslo_log import log
+
+    LOG = log.getLogger(__name__)
+
+    LOG.debug('Foobar')
+
+
+AssertEqual argument order
+--------------------------
+
+assertEqual method's arguments should be in ('expected', 'actual') order.
+
+
+Properly Calling Callables
+--------------------------
+
+Methods, functions and classes can specify optional parameters (with default
+values) using Python's keyword arg syntax. When providing a value to such a
+callable we prefer that the call also uses keyword arg syntax. For example::
+
+    def f(required, optional=None):
+        pass
+
+    # GOOD
+    f(0, optional=True)
+
+    # BAD
+    f(0, True)
+
+This gives us the flexibility to re-order arguments and more importantly
+to add new required arguments. It's also more explicit and easier to read.
diff --git a/keystone-moon/LICENSE b/keystone-moon/LICENSE
new file mode 100644 (file)
index 0000000..68c771a
--- /dev/null
@@ -0,0 +1,176 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
diff --git a/keystone-moon/MANIFEST.in b/keystone-moon/MANIFEST.in
new file mode 100644 (file)
index 0000000..fa69c1a
--- /dev/null
@@ -0,0 +1,23 @@
+include AUTHORS
+include babel.cfg
+include ChangeLog
+include CONTRIBUTING.txt
+include LICENSE
+include HACKING.rst
+include README.rst
+include openstack-common.conf
+include run_tests.py
+include run_tests.sh
+include setup.cfg
+include setup.py
+include TODO
+include tox.ini
+include etc/*
+include httpd/*
+graft bin
+graft doc
+graft keystone/tests
+graft tools
+graft examples
+recursive-include keystone *.json *.xml *.cfg *.pem README *.po *.pot *.sql
+global-exclude *.pyc *.sdx *.log *.db *.swp keystone/tests/tmp/*
diff --git a/keystone-moon/README.rst b/keystone-moon/README.rst
new file mode 100644 (file)
index 0000000..853873c
--- /dev/null
@@ -0,0 +1,47 @@
+==================
+OpenStack Keystone
+==================
+
+Keystone provides authentication, authorization and service discovery
+mechanisms via HTTP primarily for use by projects in the OpenStack family. It
+is most commonly deployed as an HTTP interface to existing identity systems,
+such as LDAP.
+
+Developer documentation, the source of which is in ``doc/source/``, is
+published at:
+
+    http://keystone.openstack.org/
+
+The API specification and documentation are available at:
+
+    http://specs.openstack.org/openstack/keystone-specs/
+
+The canonical client library is available at:
+
+    https://github.com/openstack/python-keystoneclient
+
+Documentation for cloud administrators is available at:
+
+    http://docs.openstack.org/
+
+The source of documentation for cloud administrators is available at:
+
+    https://github.com/openstack/openstack-manuals
+
+Information about our team meeting is available at:
+
+    https://wiki.openstack.org/wiki/Meetings/KeystoneMeeting
+
+Bugs and feature requests are tracked on Launchpad at:
+
+    https://bugs.launchpad.net/keystone
+
+Future design work is tracked at:
+
+    http://specs.openstack.org/openstack/keystone-specs/#identity-program-specifications
+
+Contributors are encouraged to join IRC (``#openstack-keystone`` on freenode):
+
+    https://wiki.openstack.org/wiki/IRC
+
+For information on contributing to Keystone, see ``CONTRIBUTING.rst``.
diff --git a/keystone-moon/babel.cfg b/keystone-moon/babel.cfg
new file mode 100644 (file)
index 0000000..efceab8
--- /dev/null
@@ -0,0 +1 @@
+[python: **.py]
diff --git a/keystone-moon/bin/keystone-all b/keystone-moon/bin/keystone-all
new file mode 100755 (executable)
index 0000000..3b00382
--- /dev/null
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+
+# Copyright 2013 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+import sys
+
+
+# If ../keystone/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(__file__),
+                                   os.pardir,
+                                   os.pardir))
+if os.path.exists(os.path.join(possible_topdir,
+                               'keystone',
+                               '__init__.py')):
+    sys.path.insert(0, possible_topdir)
+
+
+from keystone.server import eventlet as eventlet_server
+
+
+if __name__ == '__main__':
+    eventlet_server.run(possible_topdir)
diff --git a/keystone-moon/bin/keystone-manage b/keystone-moon/bin/keystone-manage
new file mode 100755 (executable)
index 0000000..360c49a
--- /dev/null
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+
+# Copyright 2013 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+import sys
+
+# If ../keystone/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+                                   os.pardir,
+                                   os.pardir))
+if os.path.exists(os.path.join(possible_topdir,
+                               'keystone',
+                               '__init__.py')):
+    sys.path.insert(0, possible_topdir)
+
+from keystone import cli
+from keystone.common import environment
+
+
+if __name__ == '__main__':
+    environment.use_stdlib()
+
+    dev_conf = os.path.join(possible_topdir,
+                            'etc',
+                            'keystone.conf')
+    config_files = None
+    if os.path.exists(dev_conf):
+        config_files = [dev_conf]
+
+    cli.main(argv=sys.argv, config_files=config_files)
diff --git a/keystone-moon/config-generator/keystone.conf b/keystone-moon/config-generator/keystone.conf
new file mode 100644 (file)
index 0000000..920c650
--- /dev/null
@@ -0,0 +1,14 @@
+[DEFAULT]
+output_file = etc/keystone.conf.sample
+wrap_width = 79
+namespace = keystone
+namespace = keystone.notifications
+namespace = keystone.openstack.common.eventlet_backdoor
+namespace = oslo.log
+namespace = oslo.messaging
+namespace = oslo.policy
+namespace = oslo.db
+namespace = oslo.middleware
+# We don't use oslo.concurrency config options in
+# keystone now, just in case it slips through unnoticed.
+#namespace = oslo.concurrency
diff --git a/keystone-moon/doc/Makefile b/keystone-moon/doc/Makefile
new file mode 100644 (file)
index 0000000..7986170
--- /dev/null
@@ -0,0 +1,159 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = build
+SOURCEDIR     = source
+SPHINXAPIDOC  = sphinx-apidoc
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+       @echo "Please use \`make <target>' where <target> is one of"
+       @echo "  autodoc    generate the autodoc templates"
+       @echo "  html       to make standalone HTML files"
+       @echo "  dirhtml    to make HTML files named index.html in directories"
+       @echo "  singlehtml to make a single large HTML file"
+       @echo "  pickle     to make pickle files"
+       @echo "  json       to make JSON files"
+       @echo "  htmlhelp   to make HTML files and a HTML help project"
+       @echo "  qthelp     to make HTML files and a qthelp project"
+       @echo "  devhelp    to make HTML files and a Devhelp project"
+       @echo "  epub       to make an epub"
+       @echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+       @echo "  latexpdf   to make LaTeX files and run them through pdflatex"
+       @echo "  text       to make text files"
+       @echo "  man        to make manual pages"
+       @echo "  texinfo    to make Texinfo files"
+       @echo "  info       to make Texinfo files and run them through makeinfo"
+       @echo "  gettext    to make PO message catalogs"
+       @echo "  changes    to make an overview of all changed/added/deprecated items"
+       @echo "  linkcheck  to check all external links for integrity"
+       @echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+       -rm -rf $(BUILDDIR)/*
+
+autodoc:
+       $(SPHINXAPIDOC) -f -o $(SOURCEDIR) ../keystone
+
+html: autodoc
+       $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+       @echo
+       @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+       $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+       @echo
+       @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+       $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+       @echo
+       @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+       $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+       @echo
+       @echo "Build finished; now you can process the pickle files."
+
+json:
+       $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+       @echo
+       @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+       $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+       @echo
+       @echo "Build finished; now you can run HTML Help Workshop with the" \
+             ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+       $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+       @echo
+       @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+             ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+       @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/keystone.qhcp"
+       @echo "To view the help file:"
+       @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/keystone.qhc"
+
+devhelp:
+       $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+       @echo
+       @echo "Build finished."
+       @echo "To view the help file:"
+       @echo "# mkdir -p $$HOME/.local/share/devhelp/keystone"
+       @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/keystone"
+       @echo "# devhelp"
+
+epub:
+       $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+       @echo
+       @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+       $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+       @echo
+       @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+       @echo "Run \`make' in that directory to run these through (pdf)latex" \
+             "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+       $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+       @echo "Running LaTeX files through pdflatex..."
+       $(MAKE) -C $(BUILDDIR)/latex all-pdf
+       @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+       $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+       @echo
+       @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+       $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+       @echo
+       @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+       $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+       @echo
+       @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+       @echo "Run \`make' in that directory to run these through makeinfo" \
+             "(use \`make info' here to do that automatically)."
+
+info:
+       $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+       @echo "Running Texinfo files through makeinfo..."
+       make -C $(BUILDDIR)/texinfo info
+       @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+       $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+       @echo
+       @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+       $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+       @echo
+       @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+       $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+       @echo
+       @echo "Link check complete; look for any errors in the above output " \
+             "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+       $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+       @echo "Testing of doctests in the sources finished, look at the " \
+             "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/keystone-moon/doc/README.rst b/keystone-moon/doc/README.rst
new file mode 100644 (file)
index 0000000..a9537b9
--- /dev/null
@@ -0,0 +1,9 @@
+Building Docs
+=============
+
+Developer documentation is generated using Sphinx. To build this documentation,
+run the following from the root of the repository::
+
+  $ tox -e docs
+
+The documentation will be built at ``doc/build/``.
diff --git a/keystone-moon/doc/ext/__init__.py b/keystone-moon/doc/ext/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/doc/ext/apidoc.py b/keystone-moon/doc/ext/apidoc.py
new file mode 100644 (file)
index 0000000..435d388
--- /dev/null
@@ -0,0 +1,45 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# NOTE(dstanek): Uncomment the [pbr] section in setup.cfg and remove this
+# Sphinx extension when https://launchpad.net/bugs/1260495 is fixed.
+
+import os.path as path
+
+from sphinx import apidoc
+
+
+# NOTE(dstanek): pbr will run Sphinx multiple times when it generates
+# documentation. Once for each builder. To run this extension we use the
+# 'builder-inited' hook that fires at the beginning of a Sphinx build.
+# We use ``run_already`` to make sure apidocs are only generated once
+# even if Sphinx is run multiple times.
+run_already = False
+
+
+def run_apidoc(app):
+    global run_already
+    if run_already:
+        return
+    run_already = True
+
+    package_dir = path.abspath(path.join(app.srcdir, '..', '..', 'keystone'))
+    source_dir = path.join(app.srcdir, 'api')
+    apidoc.main(['apidoc', package_dir, '-f',
+                 '-H', 'Keystone Modules',
+                 '-o', source_dir])
+
+
+def setup(app):
+    app.connect('builder-inited', run_apidoc)
diff --git a/keystone-moon/doc/keystone_compat_flows.sdx b/keystone-moon/doc/keystone_compat_flows.sdx
new file mode 100644 (file)
index 0000000..f1fcc5f
--- /dev/null
@@ -0,0 +1,99 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<diagram>
+<source><![CDATA[client:client "Client"
+compat:compat "Compat"
+token:token "Token Service"
+identity:identity "Identity Service"
+catalog:catalog "Catalog Service"
+
+[c "Auth, No Tenant"]
+client:{token, user, service_catalog}=compat.POST /v2.0/tokens {'username': user, 'password': password}
+       compat:(user, password, None)=identity.authenticate(user, password, tenant=None)
+       compat:(id, user, password, None)=token.create_token(user, password, tenant=None)
+       compat:{service_catalog (includes all tenants)}=catalog.get_catalog(user, None)
+[/c]
+
+[c "Auth, With Tenant"]
+client:{scoped_token, user, service_catalog}=compat.POST /v2.0/tokens {'username': user, 'password': password, 'tenant': tenant}
+       compat:(user, password, tenant)=identity.authenticate(user, password, tenant)
+       compat:(id, user, password, tenant)=token.create_token(user, password, tenant)
+       compat:{service_catalog (includes all tenants)}=catalog.get_catalog(user, tenant)
+[/c]
+
+[c "Validate Token, Unscoped"]
+client:{token, user, tenant=None}=compat.GET /v2.0/tokens/$token
+compat:{token, user, tenant}=token.get_token($token)
+[/c]
+
+[c "Validate Token, With Tenant"]
+client:{token, user, tenant}=compat.GET /v2.0/tokens/$token?belongs_to=$tenant
+compat:{token, user, tenant}=token.get_token($token)
+[/c]
+
+[c "Tenants for Token"]
+client:{tenants}=compat.(X-Auth-Token: $token) GET /v2.0/tenants
+compat:{token, user, tenant}=token.get_token($token)
+compat:{token, user, tenant}=identity.get_tenants($user)
+[/c]]]></source>
+<configuration>
+<property name="activationBarBorderThickness" value="1"/>
+<property name="actorWidth" value="25"/>
+<property name="arrowColor" value="-14803256"/>
+<property name="arrowSize" value="6"/>
+<property name="arrowThickness" value="1"/>
+<property name="colorizeThreads" value="true"/>
+<property name="destructorWidth" value="30"/>
+<property name="explicitReturns" value="false"/>
+<property family="Dialog" name="font" size="12" style="0"/>
+<property name="fragmentBorderThickness" value="2"/>
+<property name="fragmentEdgeColor" value="-16751616"/>
+<property name="fragmentLabelBgColor" value="-36"/>
+<property name="fragmentMargin" value="8"/>
+<property name="fragmentPadding" value="10"/>
+<property name="fragmentTextPadding" value="3"/>
+<property name="glue" value="10"/>
+<property name="headHeight" value="35"/>
+<property name="headLabelPadding" value="5"/>
+<property name="headWidth" value="100"/>
+<property name="initialSpace" value="10"/>
+<property name="labeledBoxBgColor" value="-76"/>
+<property name="leftMargin" value="5"/>
+<property name="lifelineThickness" value="1"/>
+<property name="lineWrap" value="false"/>
+<property name="lowerMargin" value="5"/>
+<property name="mainLifelineWidth" value="8"/>
+<property name="messageLabelSpace" value="3"/>
+<property name="messagePadding" value="6"/>
+<property name="noteBgColor" value="-76"/>
+<property name="noteBorderThickness" value="1"/>
+<property name="noteMargin" value="6"/>
+<property name="notePadding" value="6"/>
+<property name="opaqueMessageText" value="false"/>
+<property name="returnArrowVisible" value="true"/>
+<property name="rightMargin" value="5"/>
+<property name="selfMessageHorizontalSpace" value="15"/>
+<property name="separatorBottomMargin" value="8"/>
+<property name="separatorTopMargin" value="15"/>
+<property name="shouldShadowParticipants" value="true"/>
+<property name="slackMode" value="false"/>
+<property name="spaceBeforeActivation" value="2"/>
+<property name="spaceBeforeAnswerToSelf" value="10"/>
+<property name="spaceBeforeConstruction" value="6"/>
+<property name="spaceBeforeSelfMessage" value="7"/>
+<property name="subLifelineWidth" value="6"/>
+<property name="tc0" value="-1118482"/>
+<property name="tc1" value="-256"/>
+<property name="tc2" value="-65536"/>
+<property name="tc3" value="-16776961"/>
+<property name="tc4" value="-16711936"/>
+<property name="tc5" value="-4144960"/>
+<property name="tc6" value="-65281"/>
+<property name="tc7" value="-14336"/>
+<property name="tc8" value="-20561"/>
+<property name="tc9" value="-12566464"/>
+<property name="threadNumbersVisible" value="false"/>
+<property name="threaded" value="true"/>
+<property name="upperMargin" value="5"/>
+<property name="verticallySplit" value="true"/>
+</configuration>
+</diagram>
diff --git a/keystone-moon/doc/source/apache-httpd.rst b/keystone-moon/doc/source/apache-httpd.rst
new file mode 100644 (file)
index 0000000..c075512
--- /dev/null
@@ -0,0 +1,93 @@
+
+..
+      Copyright 2011-2012 OpenStack Foundation
+      All Rights Reserved.
+
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+=========================
+Running Keystone in HTTPD
+=========================
+
+.. WARNING::
+
+    Running Keystone under HTTPD in the recommended (and tested) configuration
+    does not support the use of ``Transfer-Encoding: chunked``. This is due to
+    a limitation with the WSGI spec and the implementation used by
+    ``mod_wsgi``. It is recommended that all clients assume Keystone will not
+    support ``Transfer-Encoding: chunked``.
+
+
+Files
+-----
+
+Copy the file httpd/wsgi-keystone.conf to the appropriate location for your
+Apache server, most likely::
+
+    /etc/httpd/conf.d/wsgi-keystone.conf
+
+Update this file to match your system configuration (for example, some
+distributions put httpd logs in the ``apache2`` directory and some in the
+``httpd`` directory; also, enable TLS).
+
+Create the directory ``/var/www/cgi-bin/keystone/``. You can either hardlink or
+softlink the files ``main`` and ``admin`` to the file ``keystone.py`` in this
+directory. For a distribution appropriate place, it should probably be copied
+to::
+
+    /usr/share/openstack/keystone/httpd/keystone.py
+
+Keystone's primary configuration file (``etc/keystone.conf``) and the
+PasteDeploy configuration file (``etc/keystone-paste.ini``) must be readable to
+HTTPD in one of the default locations described in :doc:`configuration`.
+
+SELinux
+-------
+
+If you are running with SELinux enabled (and you should be) make sure that the
+file has the appropriate SELinux context to access the linked file. If you
+have the file in /var/www/cgi-bin,  you can do this by running:
+
+.. code-block:: bash
+
+    $ sudo restorecon /var/www/cgi-bin
+
+Putting it somewhere else requires you set up your SELinux policy accordingly.
+
+Keystone Configuration
+----------------------
+
+Make sure that when using a token format that requires persistence, you use a
+token persistence driver that can be shared between processes. The SQL and
+memcached token persistence drivers provided with keystone can be shared
+between processes.
+
+.. WARNING::
+
+    The KVS (``keystone.token.persistence.backends.kvs.Token``) token
+    persistence driver cannot be shared between processes so must not be used
+    when running keystone under HTTPD (the tokens will not be shared between
+    the processes of the server and validation will fail).
+
+For SQL, in ``/etc/keystone/keystone.conf`` set::
+
+    [token]
+    driver = keystone.token.persistence.backends.sql.Token
+
+For memcached, in ``/etc/keystone/keystone.conf`` set::
+
+    [token]
+    driver = keystone.token.persistence.backends.memcache.Token
+
+All servers that are storing tokens need a shared backend. This means that
+either all servers use the same database server or use a common memcached pool.
diff --git a/keystone-moon/doc/source/api_curl_examples.rst b/keystone-moon/doc/source/api_curl_examples.rst
new file mode 100644 (file)
index 0000000..a4b3155
--- /dev/null
@@ -0,0 +1,1153 @@
+..
+      Copyright 2011-2012 OpenStack Foundation
+      All Rights Reserved.
+
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+=======================
+API Examples using Curl
+=======================
+
+--------------------------
+v3 API Examples Using Curl
+--------------------------
+
+Tokens
+======
+
+Default scope
+-------------
+
+Get an token with default scope (may be unscoped):
+
+.. code-block:: bash
+
+    curl -i \
+      -H "Content-Type: application/json" \
+      -d '
+    { "auth": {
+        "identity": {
+          "methods": ["password"],
+          "password": {
+            "user": {
+              "name": "admin",
+              "domain": { "id": "default" },
+              "password": "adminpwd"
+            }
+          }
+        }
+      }
+    }' \
+      http://localhost:5000/v3/auth/tokens ; echo
+
+Example response::
+
+  HTTP/1.1 201 Created
+  X-Subject-Token: MIIFvgY...
+  Vary: X-Auth-Token
+  Content-Type: application/json
+  Content-Length: 1025
+  Date: Tue, 10 Jun 2014 20:55:16 GMT
+
+  {"token": {"methods": ["password"], "roles": [{"id":
+  "9fe2ff9ee4384b1894a90878d3e92bab", "name": "_member_"}, {"id":
+  "c703057be878458588961ce9a0ce686b", "name": "admin"}], "expires_at":
+  "2014-06-10T2:55:16.806001Z", "project": {"domain": {"id": "default", "name":
+  "Default"}, "id": "8538a3f13f9541b28c2620eb19065e45", "name": "admin"},
+  "catalog": [{"endpoints": [{"url": "http://localhost:3537/v2.0", "region":
+  "RegionOne", "interface": "admin", "id": "29beb2f1567642eb810b042b6719ea88"},
+  {"url": "http://localhost:5000/v2.0", "region": "RegionOne", "interface":
+  "internal", "id": "8707e3735d4415c97ae231b4841eb1c"}, {"url":
+  "http://localhost:5000/v2.0", "region": "RegionOne", "interface": "public",
+  "id": "ef303187fc8d41668f25199c298396a5"}], "type": "identity", "id":
+  "bd73972c0e14fb69bae8ff76e112a90", "name": "keystone"}], "extras": {},
+  "user": {"domain": {"id": "default", "name": "Default"}, "id":
+  "3ec3164f750146be97f21559ee4d9c51", "name": "admin"}, "issued_at":
+  "201406-10T20:55:16.806027Z"}}
+
+
+Project-scoped
+--------------
+
+Get a project-scoped token:
+
+.. code-block:: bash
+
+    curl -i \
+      -H "Content-Type: application/json" \
+      -d '
+    { "auth": {
+        "identity": {
+          "methods": ["password"],
+          "password": {
+            "user": {
+              "name": "admin",
+              "domain": { "id": "default" },
+              "password": "adminpwd"
+            }
+          }
+        },
+        "scope": {
+          "project": {
+            "name": "demo",
+            "domain": { "id": "default" }
+          }
+        }
+      }
+    }' \
+      http://localhost:5000/v3/auth/tokens ; echo
+
+Example response::
+
+  HTTP/1.1 201 Created
+  X-Subject-Token: MIIFfQ...
+  Vary: X-Auth-Token
+  Content-Type: application/json
+  Content-Length: 960
+  Date: Tue, 10 Jun 2014 20:40:14 GMT
+
+  {"token": {"methods": ["password"], "roles": [{"id":
+   "c703057be878458588961ce9a0ce686b", "name": "admin"}], "expires_at":
+   "2014-06-10T21:40:14.360795Z", "project": {"domain": {"id": "default",
+   "name": "Default"}, "id": "3d4c2c82bd5948f0bcab0cf3a7c9b48c", "name":
+   "demo"}, "catalog": [{"endpoints": [{"url":
+   "http://localhost:35357/v2.0", "region": "RegionOne", "interface": "admin",
+   "id": "29beb2f1567642eb810b042b6719ea88"}, {"url":
+   "http://localhost:5000/v2.0", "region": "RegionOne", "interface":
+   "internal", "id": "87057e3735d4415c97ae231b4841eb1c"}, {"url":
+   "http://localhost:5000/v2.0", "region": "RegionOne", "interface": "public",
+   "id": "ef303187fc8d41668f25199c298396a5"}], "type": "identity", "id":
+   "bd7397d2c0e14fb69bae8ff76e112a90", "name": "keystone"}], "extras": {},
+   "user": {"domain": {"id": "default", "name": "Default"}, "id":
+   "3ec3164f750146be97f21559ee4d9c51", "name": "admin"}, "issued_at":
+   "2014-06-10T20:40:14.360822Z"}}
+
+
+Domain-Scoped
+-------------
+
+Get a domain-scoped token (Note that you're going to need a role-assignment on
+the domain first!):
+
+.. code-block:: bash
+
+    curl -i \
+      -H "Content-Type: application/json" \
+      -d '
+    { "auth": {
+        "identity": {
+          "methods": ["password"],
+          "password": {
+            "user": {
+              "name": "admin",
+              "domain": { "id": "default" },
+              "password": "adminpwd"
+            }
+          }
+        },
+        "scope": {
+          "domain": {
+            "id": "default"
+          }
+        }
+      }
+    }' \
+      http://localhost:5000/v3/auth/tokens ; echo
+
+Example response::
+
+  HTTP/1.1 201 Created
+  X-Subject-Token: MIIFNg...
+  Vary: X-Auth-Token
+  Content-Type: application/json
+  Content-Length: 889
+  Date: Tue, 10 Jun 2014 20:52:59 GMT
+
+  {"token": {"domain": {"id": "default", "name": "Default"}, "methods":
+  ["password"], "roles": [{"id": "c703057be878458588961ce9a0ce686b", "name":
+  "admin"}], "expires_at": "2014-06-10T21:52:58.852167Z", "catalog":
+  [{"endpoints": [{"url": "http://localhost:35357/v2.0", "region": "RegionOne",
+  "interface": "admin", "id": "29beb2f1567642eb810b042b6719ea88"}, {"url":
+  "http://localhost:5000/v2.0", "region": "RegionOne", "interface": "internal",
+  "id": "87057e3735d4415c97ae231b4841eb1c"}, {"url":
+  "http://localhost:5000/v2.0", "region": "RegionOne", "interface": "public",
+  "id": "ef303187fc8d41668f25199c298396a5"}], "type": "identity", "id":
+  "bd7397d2c0e14fb69bae8ff76e112a90", "name": "keystone"}], "extras": {},
+  "user": {"domain": {"id": "default", "name": "Default"}, "id":
+  "3ec3164f750146be97f21559ee4d9c51", "name": "admin"}, "issued_at":
+  "2014-06-10T20:52:58.852194Z"}}
+
+
+Getting a token from a token
+----------------------------
+
+Get a token from a token:
+
+.. code-block:: bash
+
+    curl -i \
+      -H "Content-Type: application/json" \
+      -d '
+    { "auth": {
+        "identity": {
+          "methods": ["token"],
+          "token": {
+            "id": "'$OS_TOKEN'"
+          }
+        }
+      }
+    }' \
+      http://localhost:5000/v3/auth/tokens ; echo
+
+
+Example response::
+
+  HTTP/1.1 201 Created
+  X-Subject-Token: MIIFxw...
+  Vary: X-Auth-Token
+  Content-Type: application/json
+  Content-Length: 1034
+  Date: Tue, 10 Jun 2014 21:00:05 GMT
+
+  {"token": {"methods": ["token", "password"], "roles": [{"id":
+  "9fe2ff9ee4384b1894a90878d3e92bab", "name": "_member_"}, {"id":
+  "c703057be878458588961ce9a0ce686b", "name": "admin"}], "expires_at":
+  "2014-06-10T21:55:16.806001Z", "project": {"domain": {"id": "default",
+  "name": "Default"}, "id": "8538a3f13f9541b28c2620eb19065e45", "name":
+  "admin"}, "catalog": [{"endpoints": [{"url": "http://localhost:35357/v2.0",
+  "region": "RegionOne", "interface": "admin", "id":
+  "29beb2f1567642eb810b042b6719ea88"}, {"url": "http://localhost:5000/v2.0",
+  "region": "RegionOne", "interface": "internal", "id":
+  "87057e3735d4415c97ae231b4841eb1c"}, {"url": "http://localhost:5000/v2.0",
+  "region": "RegionOne", "interface": "public", "id":
+  "ef303187fc8d41668f25199c298396a5"}], "type": "identity", "id":
+  "bd7397d2c0e14fb69bae8ff76e112a90", "name": "keystone"}], "extras": {},
+  "user": {"domain": {"id": "default", "name": "Default"}, "id":
+  "3ec3164f750146be97f21559ee4d9c51", "name": "admin"}, "issued_at":
+  "2014-06-10T21:00:05.548559Z"}}
+
+
+.. note::
+
+    If a scope was included in the request body then this would get a token
+    with the new scope.
+
+
+DELETE /v3/auth/tokens
+----------------------
+
+Revoke a token:
+
+.. code-block:: bash
+
+    curl -i -X DELETE \
+      -H "X-Auth-Token: $OS_TOKEN" \
+      -H "X-Subject-Token: $OS_TOKEN" \
+      http://localhost:5000/v3/auth/tokens
+
+If there's no error then the response is empty.
+
+
+Domains
+=======
+
+GET /v3/domains
+---------------
+
+List domains:
+
+.. code-block:: bash
+
+    curl -s \
+      -H "X-Auth-Token: $OS_TOKEN" \
+      http://localhost:5000/v3/domains | python -mjson.tool
+
+Example response:
+
+.. code-block:: javascript
+
+    {
+        "domains": [
+            {
+                "description": "Owns users and tenants (i.e. projects) available on Identity API v2.",
+                "enabled": true,
+                "id": "default",
+                "links": {
+                    "self": "http://identity-server:5000/v3/domains/default"
+                },
+                "name": "Default"
+            }
+        ],
+        "links": {
+            "next": null,
+            "previous": null,
+            "self": "http://identity-server:5000/v3/domains"
+        }
+    }
+
+
+POST /v3/domains
+----------------
+
+Create a domain:
+
+.. code-block:: bash
+
+    curl -s \
+      -H "X-Auth-Token: $OS_TOKEN" \
+      -H "Content-Type: application/json" \
+      -d '{ "domain": { "name": "newdomain"}}' \
+      http://localhost:5000/v3/domains | python -mjson.tool
+
+Example response:
+
+.. code-block:: javascript
+
+    {
+        "domain": {
+            "enabled": true,
+            "id": "3a5140aecd974bf08041328b53a62458",
+            "links": {
+                "self": "http://identity-server:5000/v3/domains/3a5140aecd974bf08041328b53a62458"
+            },
+            "name": "newdomain"
+        }
+    }
+
+
+Projects
+========
+
+GET /v3/projects
+----------------
+
+List projects:
+
+.. code-block:: bash
+
+    curl -s \
+     -H "X-Auth-Token: $OS_TOKEN" \
+     http://localhost:5000/v3/projects | python -mjson.tool
+
+Example response:
+
+.. code-block:: javascript
+
+    {
+        "links": {
+            "next": null,
+            "previous": null,
+            "self": "http://localhost:5000/v3/projects"
+        },
+        "projects": [
+            {
+                "description": null,
+                "domain_id": "default",
+                "enabled": true,
+                "id": "3d4c2c82bd5948f0bcab0cf3a7c9b48c",
+                "links": {
+                    "self": "http://localhost:5000/v3/projects/3d4c2c82bd5948f0bcab0cf3a7c9b48c"
+                },
+                "name": "demo"
+            }
+        ]
+    }
+
+
+PATCH /v3/projects/{id}
+-----------------------
+
+Disable a project:
+
+.. code-block:: bash
+
+    curl -s -X PATCH \
+      -H "X-Auth-Token: $OS_TOKEN" \
+      -H "Content-Type: application/json" \
+      -d '
+    {
+      "project": {
+          "enabled": false
+        }
+    }'\
+      http://localhost:5000/v3/projects/$PROJECT_ID  | python -mjson.tool
+
+Example response:
+
+.. code-block:: javascript
+
+    {
+        "project": {
+            "description": null,
+            "domain_id": "default",
+            "enabled": false,
+            "extra": {},
+            "id": "3d4c2c82bd5948f0bcab0cf3a7c9b48c",
+            "links": {
+                "self": "http://localhost:5000/v3/projects/3d4c2c82bd5948f0bcab0cf3a7c9b48c"
+            },
+            "name": "demo"
+        }
+    }
+
+
+GET /v3/services
+================
+
+List the services:
+
+.. code-block:: bash
+
+    curl -s \
+      -H "X-Auth-Token: $OS_TOKEN" \
+      http://localhost:5000/v3/services | python -mjson.tool
+
+Example response:
+
+.. code-block:: javascript
+
+    {
+        "links": {
+            "next": null,
+            "previous": null,
+            "self": "http://localhost:5000/v3/services"
+        },
+        "services": [
+            {
+                "description": "Keystone Identity Service",
+                "enabled": true,
+                "id": "bd7397d2c0e14fb69bae8ff76e112a90",
+                "links": {
+                    "self": "http://localhost:5000/v3/services/bd7397d2c0e14fb69bae8ff76e112a90"
+                },
+                "name": "keystone",
+                "type": "identity"
+            }
+        ]
+    }
+
+
+
+GET /v3/endpoints
+=================
+
+List the endpoints:
+
+.. code-block:: bash
+
+    curl -s \
+     -H "X-Auth-Token: $OS_TOKEN" \
+     http://localhost:5000/v3/endpoints | python -mjson.tool
+
+Example response:
+
+.. code-block:: javascript
+
+    {
+        "endpoints": [
+            {
+                "enabled": true,
+                "id": "29beb2f1567642eb810b042b6719ea88",
+                "interface": "admin",
+                "links": {
+                    "self": "http://localhost:5000/v3/endpoints/29beb2f1567642eb810b042b6719ea88"
+                },
+                "region": "RegionOne",
+                "service_id": "bd7397d2c0e14fb69bae8ff76e112a90",
+                "url": "http://localhost:35357/v2.0"
+            }
+        ],
+        "links": {
+            "next": null,
+            "previous": null,
+            "self": "http://localhost:5000/v3/endpoints"
+        }
+    }
+
+
+Users
+=====
+
+GET /v3/users
+-------------
+
+List users:
+
+.. code-block:: bash
+
+    curl -s \
+     -H "X-Auth-Token: $OS_TOKEN" \
+     http://localhost:5000/v3/users | python -mjson.tool
+
+POST /v3/users
+--------------
+
+Create a user:
+
+.. code-block:: bash
+
+    curl -s \
+     -H "X-Auth-Token: $OS_TOKEN" \
+     -H "Content-Type: application/json" \
+     -d '{"user": {"name": "newuser", "password": "changeme"}}' \
+     http://localhost:5000/v3/users | python -mjson.tool
+
+Example response:
+
+.. code-block:: javascript
+
+    {
+        "user": {
+            "domain_id": "default",
+            "enabled": true,
+            "id": "ec8fc20605354edd91873f2d66bf4fc4",
+            "links": {
+                "self": "http://identity-server:5000/v3/users/ec8fc20605354edd91873f2d66bf4fc4"
+            },
+            "name": "newuser"
+        }
+    }
+
+GET /v3/users/{user_id}
+-----------------------
+
+Show details for a user:
+
+.. code-block:: bash
+
+    USER_ID=ec8fc20605354edd91873f2d66bf4fc4
+
+    curl -s \
+     -H "X-Auth-Token: $OS_TOKEN" \
+     http://localhost:5000/v3/users/$USER_ID | python -mjson.tool
+
+Example response:
+
+.. code-block:: javascript
+
+    {
+        "user": {
+            "domain_id": "default",
+            "enabled": true,
+            "id": "ec8fc20605354edd91873f2d66bf4fc4",
+            "links": {
+                "self": "http://localhost:5000/v3/users/ec8fc20605354edd91873f2d66bf4fc4"
+            },
+            "name": "newuser"
+        }
+    }
+
+POST /v3/users/{user_id}/password
+---------------------------------
+
+Change password (using the default policy, this can be done as the user):
+
+.. code-block:: bash
+
+    USER_ID=b7793000f8d84c79af4e215e9da78654
+    ORIG_PASS=userpwd
+    NEW_PASS=newuserpwd
+
+    curl \
+     -H "X-Auth-Token: $OS_TOKEN" \
+     -H "Content-Type: application/json" \
+     -d '{ "user": {"password": "'$NEW_PASS'", "original_password": "'$ORIG_PASS'"} }' \
+     http://localhost:5000/v3/users/$USER_ID/password
+
+.. note::
+
+    This command doesn't print anything if the request was successful.
+
+PATCH /v3/users/{user_id}
+-------------------------
+
+Reset password (using the default policy, this requires admin):
+
+.. code-block:: bash
+
+    USER_ID=b7793000f8d84c79af4e215e9da78654
+    NEW_PASS=newuserpwd
+
+    curl -s -X PATCH \
+     -H "X-Auth-Token: $OS_TOKEN" \
+     -H "Content-Type: application/json" \
+     -d '{ "user": {"password": "'$NEW_PASS'"} }' \
+     http://localhost:5000/v3/users/$USER_ID | python -mjson.tool
+
+Example response:
+
+.. code-block:: javascript
+
+    {
+        "user": {
+            "default_project_id": "3d4c2c82bd5948f0bcab0cf3a7c9b48c",
+            "domain_id": "default",
+            "email": "demo@example.com",
+            "enabled": true,
+            "extra": {
+                "email": "demo@example.com"
+            },
+            "id": "269348fdd9374b8885da1418e0730af1",
+            "links": {
+                "self": "http://localhost:5000/v3/users/269348fdd9374b8885da1418e0730af1"
+            },
+            "name": "demo"
+        }
+    }
+
+
+PUT /v3/projects/{project_id}/groups/{group_id}/roles/{role_id}
+===============================================================
+
+Create group role assignment on project:
+
+.. code-block:: bash
+
+    curl -s -X PUT \
+     -H "X-Auth-Token: $OS_TOKEN" \
+     http://localhost:5000/v3/projects/$PROJECT_ID/groups/$GROUP_ID/roles/$ROLE_ID |
+       python -mjson.tool
+
+There's no data in the response if the operation is successful.
+
+
+POST /v3/OS-TRUST/trusts
+========================
+
+Create a trust:
+
+.. code-block:: bash
+
+    curl -s \
+     -H "X-Auth-Token: $OS_TOKEN" \
+     -H "Content-Type: application/json" \
+     -d '
+    { "trust": {
+        "expires_at": "2014-12-30T23:59:59.999999Z",
+        "impersonation": false,
+        "project_id": "'$PROJECT_ID'",
+        "roles": [
+            { "name": "admin" }
+          ],
+        "trustee_user_id": "'$DEMO_USER_ID'",
+        "trustor_user_id": "'$ADMIN_USER_ID'"
+    }}'\
+     http://localhost:5000/v3/OS-TRUST/trusts | python -mjson.tool
+
+Example response:
+
+.. code-block:: javascript
+
+    {
+        "trust": {
+            "expires_at": "2014-12-30T23:59:59.999999Z",
+            "id": "394998fa61f14736b1f0c1f322882949",
+            "impersonation": false,
+            "links": {
+                "self": "http://localhost:5000/v3/OS-TRUST/trusts/394998fa61f14736b1f0c1f322882949"
+            },
+            "project_id": "3d4c2c82bd5948f0bcab0cf3a7c9b48c",
+            "remaining_uses": null,
+            "roles": [
+                {
+                    "id": "c703057be878458588961ce9a0ce686b",
+                    "links": {
+                        "self": "http://localhost:5000/v3/roles/c703057be878458588961ce9a0ce686b"
+                    },
+                    "name": "admin"
+                }
+            ],
+            "roles_links": {
+                "next": null,
+                "previous": null,
+                "self": "http://localhost:5000/v3/OS-TRUST/trusts/394998fa61f14736b1f0c1f322882949/roles"
+            },
+            "trustee_user_id": "269348fdd9374b8885da1418e0730af1",
+            "trustor_user_id": "3ec3164f750146be97f21559ee4d9c51"
+        }
+    }
+
+
+-------------------------------
+Service API Examples Using Curl
+-------------------------------
+
+The service API is defined to be a subset of the Admin API and, by
+default, runs on port 5000.
+
+GET /
+=====
+
+This call is identical to that documented for the Admin API, except
+that it uses port 5000, instead of port 35357, by default:
+
+.. code-block:: bash
+
+    $ curl http://0.0.0.0:5000
+
+or:
+
+.. code-block:: bash
+
+    $ curl http://0.0.0.0:5000/v2.0/
+
+See the `Admin API Examples Using Curl`_ for more info.
+
+GET /extensions
+===============
+
+This call is identical to that documented for the Admin API.
+
+POST /tokens
+============
+
+This call is identical to that documented for the Admin API.
+
+GET /tenants
+============
+
+List all of the tenants your token can access:
+
+.. code-block:: bash
+
+    $ curl -H "X-Auth-Token:887665443383838" http://localhost:5000/v2.0/tenants
+
+Returns:
+
+.. code-block:: javascript
+
+    {
+        "tenants_links": [],
+        "tenants": [
+            {
+                "enabled": true,
+                "description": "None",
+                "name": "customer-x",
+                "id": "1"
+            }
+        ]
+    }
+
+-----------------------------
+Admin API Examples Using Curl
+-----------------------------
+
+These examples assume a default port value of 35357, and depend on the
+``sampledata`` bundled with keystone.
+
+GET /
+=====
+
+Discover API version information, links to documentation (PDF, HTML, WADL),
+and supported media types:
+
+.. code-block:: bash
+
+    $ curl http://0.0.0.0:35357
+
+or:
+
+.. code-block:: bash
+
+    $ curl http://0.0.0.0:35357/v2.0/
+
+Returns:
+
+.. code-block:: javascript
+
+    {
+        "version":{
+            "id":"v2.0",
+            "status":"beta",
+            "updated":"2011-11-19T00:00:00Z",
+            "links":[
+                {
+                    "rel":"self",
+                    "href":"http://127.0.0.1:35357/v2.0/"
+                },
+                {
+                    "rel":"describedby",
+                    "type":"text/html",
+                    "href":"http://docs.openstack.org/"
+                },
+            ],
+            "media-types":[
+                {
+                    "base":"application/json",
+                    "type":"application/vnd.openstack.identity-v2.0+json"
+                }
+            ]
+        }
+    }
+
+GET /extensions
+===============
+
+Discover the API extensions enabled at the endpoint:
+
+.. code-block:: bash
+
+    $ curl http://localhost:35357/v2.0/extensions/
+
+Returns:
+
+.. code-block:: javascript
+
+    {
+        "extensions":{
+            "values":[]
+        }
+    }
+
+POST /tokens
+============
+
+Authenticate by exchanging credentials for an access token:
+
+.. code-block:: bash
+
+    $ curl -d '{"auth":{"tenantName": "customer-x", "passwordCredentials": {"username": "joeuser", "password": "secrete"}}}' -H "Content-type: application/json" http://localhost:35357/v2.0/tokens
+
+Returns:
+
+.. code-block:: javascript
+
+    {
+        "access":{
+            "token":{
+                "expires":"2012-02-05T00:00:00",
+                "id":"887665443383838",
+                "tenant":{
+                    "id":"1",
+                    "name":"customer-x"
+                }
+            },
+            "serviceCatalog":[
+                {
+                    "endpoints":[
+                    {
+                        "adminURL":"http://swift.admin-nets.local:8080/",
+                        "region":"RegionOne",
+                        "internalURL":"http://127.0.0.1:8080/v1/AUTH_1",
+                        "publicURL":"http://swift.publicinternets.com/v1/AUTH_1"
+                    }
+                    ],
+                    "type":"object-store",
+                    "name":"swift"
+                },
+                {
+                    "endpoints":[
+                    {
+                        "adminURL":"http://cdn.admin-nets.local/v1.1/1",
+                        "region":"RegionOne",
+                        "internalURL":"http://127.0.0.1:7777/v1.1/1",
+                        "publicURL":"http://cdn.publicinternets.com/v1.1/1"
+                    }
+                    ],
+                    "type":"object-store",
+                    "name":"cdn"
+                }
+            ],
+            "user":{
+                "id":"1",
+                "roles":[
+                    {
+                    "tenantId":"1",
+                    "id":"3",
+                    "name":"Member"
+                    }
+                ],
+                "name":"joeuser"
+            }
+        }
+    }
+
+.. note::
+
+    Take note of the value ['access']['token']['id'] value produced here (``887665443383838``, above), as you can use it in the calls below.
+
+GET /tokens/{token_id}
+======================
+
+.. note::
+
+    This call refers to a token known to be valid, ``887665443383838`` in this case.
+
+Validate a token:
+
+.. code-block:: bash
+
+    $ curl -H "X-Auth-Token:999888777666" http://localhost:35357/v2.0/tokens/887665443383838
+
+If the token is valid, returns:
+
+.. code-block:: javascript
+
+    {
+        "access":{
+            "token":{
+                "expires":"2012-02-05T00:00:00",
+                "id":"887665443383838",
+                "tenant":{
+                    "id":"1",
+                    "name":"customer-x"
+                }
+            },
+            "user":{
+                "name":"joeuser",
+                "tenantName":"customer-x",
+                "id":"1",
+                "roles":[
+                    {
+                        "serviceId":"1",
+                        "id":"3",
+                        "name":"Member"
+                    }
+                ],
+                "tenantId":"1"
+            }
+        }
+    }
+
+HEAD /tokens/{token_id}
+=======================
+
+This is a high-performance variant of the GET call documented above, which
+by definition, returns no response body:
+
+.. code-block:: bash
+
+    $ curl -I -H "X-Auth-Token:999888777666" http://localhost:35357/v2.0/tokens/887665443383838
+
+... which returns ``200``, indicating the token is valid::
+
+    HTTP/1.1 200 OK
+    Content-Length: 0
+    Content-Type: None
+    Date: Tue, 08 Nov 2011 23:07:44 GMT
+
+GET /tokens/{token_id}/endpoints
+================================
+
+List all endpoints for a token:
+
+.. code-block:: bash
+
+    $ curl -H "X-Auth-Token:999888777666" http://localhost:35357/v2.0/tokens/887665443383838/endpoints
+
+Returns:
+
+.. code-block:: javascript
+
+    {
+        "endpoints_links": [
+            {
+                "href": "http://127.0.0.1:35357/tokens/887665443383838/endpoints?'marker=5&limit=10'",
+                "rel": "next"
+            }
+        ],
+        "endpoints": [
+            {
+                "internalURL": "http://127.0.0.1:8080/v1/AUTH_1",
+                "name": "swift",
+                "adminURL": "http://swift.admin-nets.local:8080/",
+                "region": "RegionOne",
+                "tenantId": 1,
+                "type": "object-store",
+                "id": 1,
+                "publicURL": "http://swift.publicinternets.com/v1/AUTH_1"
+            },
+            {
+                "internalURL": "http://localhost:8774/v1.0",
+                "name": "nova_compat",
+                "adminURL": "http://127.0.0.1:8774/v1.0",
+                "region": "RegionOne",
+                "tenantId": 1,
+                "type": "compute",
+                "id": 2,
+                "publicURL": "http://nova.publicinternets.com/v1.0/"
+            },
+            {
+                "internalURL": "http://localhost:8774/v1.1",
+                "name": "nova",
+                "adminURL": "http://127.0.0.1:8774/v1.1",
+                "region": "RegionOne",
+                "tenantId": 1,
+                "type": "compute",
+                "id": 3,
+                "publicURL": "http://nova.publicinternets.com/v1.1/
+            },
+            {
+                "internalURL": "http://127.0.0.1:9292/v1.1/",
+                "name": "glance",
+                "adminURL": "http://nova.admin-nets.local/v1.1/",
+                "region": "RegionOne",
+                "tenantId": 1,
+                "type": "image",
+                "id": 4,
+                "publicURL": "http://glance.publicinternets.com/v1.1/"
+            },
+            {
+                "internalURL": "http://127.0.0.1:7777/v1.1/1",
+                "name": "cdn",
+                "adminURL": "http://cdn.admin-nets.local/v1.1/1",
+                "region": "RegionOne",
+                "tenantId": 1,
+                "type": "object-store",
+                "id": 5,
+                "publicURL": "http://cdn.publicinternets.com/v1.1/1"
+            }
+        ]
+    }
+
+GET /tenants
+============
+
+List all of the tenants in the system (requires an Admin ``X-Auth-Token``):
+
+.. code-block:: bash
+
+    $ curl -H "X-Auth-Token:999888777666" http://localhost:35357/v2.0/tenants
+
+Returns:
+
+.. code-block:: javascript
+
+    {
+        "tenants_links": [],
+        "tenants": [
+            {
+                "enabled": false,
+                "description": "None",
+                "name": "project-y",
+                "id": "3"
+            },
+            {
+                "enabled": true,
+                "description": "None",
+                "name": "ANOTHER:TENANT",
+                "id": "2"
+            },
+            {
+                "enabled": true,
+                "description": "None",
+                "name": "customer-x",
+                "id": "1"
+            }
+        ]
+    }
+
+GET /tenants/{tenant_id}
+========================
+
+Retrieve information about a tenant, by tenant ID:
+
+.. code-block:: bash
+
+    $ curl -H "X-Auth-Token:999888777666" http://localhost:35357/v2.0/tenants/1
+
+Returns:
+
+.. code-block:: javascript
+
+    {
+        "tenant":{
+            "enabled":true,
+            "description":"None",
+            "name":"customer-x",
+            "id":"1"
+        }
+    }
+
+GET /tenants/{tenant_id}/users/{user_id}/roles
+==============================================
+
+List the roles a user has been granted on a tenant:
+
+.. code-block:: bash
+
+    $ curl -H "X-Auth-Token:999888777666" http://localhost:35357/v2.0/tenants/1/users/1/roles
+
+Returns:
+
+.. code-block:: javascript
+
+    {
+        "roles_links":[],
+        "roles":[
+            {
+                "id":"3",
+                "name":"Member"
+            }
+        ]
+    }
+
+GET /users/{user_id}
+====================
+
+Retrieve information about a user, by user ID:
+
+.. code-block:: bash
+
+    $ curl -H "X-Auth-Token:999888777666" http://localhost:35357/v2.0/users/1
+
+Returns:
+
+.. code-block:: javascript
+
+    {
+        "user":{
+            "tenantId":"1",
+            "enabled":true,
+            "id":"1",
+            "name":"joeuser"
+        }
+    }
+
+GET /tokens/revoked
+===================
+
+Get the revocation list:
+
+.. code-block:: bash
+
+    curl -s -H "X-Auth-Token: $OS_TOKEN" \
+      http://localhost:35357/v2.0/tokens/revoked |
+     jq -r .signed |
+     openssl cms -verify \
+      -certfile /etc/keystone/ssl/certs/signing_cert.pem \
+      -CAfile /etc/keystone/ssl/certs/ca.pem \
+      -inform PEM \
+      -nosmimecap -nodetach -nocerts -noattr 2>/dev/null |
+     python -m json.tool
+
+Example response:
+
+.. code-block:: javascript
+
+    {
+        "revoked": [
+            {
+                "expires": "2014-06-10T21:40:14Z",
+                "id": "e6e2b5c9092751f88d2bcd30b09777a9"
+            },
+            {
+                "expires": "2014-06-10T21:47:29Z",
+                "id": "883ef5d610bd1c68fbaa8ac528aa9f17"
+            },
+            {
+                "expires": "2014-06-10T21:51:52Z",
+                "id": "41775ff4838f8f406b7bad28bea0dde6"
+            }
+        ]
+    }
diff --git a/keystone-moon/doc/source/architecture.rst b/keystone-moon/doc/source/architecture.rst
new file mode 100644 (file)
index 0000000..75b0cea
--- /dev/null
@@ -0,0 +1,307 @@
+..
+      Copyright 2011-2012 OpenStack Foundation
+      All Rights Reserved.
+
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+Keystone Architecture
+=====================
+
+Much of the design is precipitated from the expectation that the auth backends
+for most deployments will actually be shims in front of existing user systems.
+
+
+------------
+The Services
+------------
+
+Keystone is organized as a group of internal services exposed on one or many
+endpoints. Many of these services are used in a combined fashion by the
+frontend, for example an authenticate call will validate user/project
+credentials with the Identity service and, upon success, create and return a
+token with the Token service.
+
+
+Identity
+--------
+
+The Identity service provides auth credential validation and data about Users,
+Groups.
+
+In the basic case all this data is managed by the service, allowing the service
+to manage all the CRUD associated with the data.
+
+In other cases from an authoritative backend service. An example of this would
+be when backending on LDAP. See `LDAP Backend` below for more details.
+
+
+Resource
+--------
+
+The Resource service provides data about Projects and Domains.
+
+Like the Identity service, this data may either be managed directly by the
+service or be pulled from another authoritative backend service, such as LDAP.
+
+
+Assignment
+----------
+
+The Assignment service provides data about Roles and Role assignments to the
+entities managed by the Identity and Resource services.  Again, like these two
+services, this data may either be managed directly by the Assignment service
+or be pulled from another authoritative backend service, such as LDAP.
+
+
+Token
+-----
+
+The Token service validates and manages Tokens used for authenticating requests
+once a user's credentials have already been verified.
+
+
+Catalog
+-------
+
+The Catalog service provides an endpoint registry used for endpoint discovery.
+
+
+Policy
+------
+
+The Policy service provides a rule-based authorization engine and the
+associated rule management interface.
+
+
+------------------------
+Application Construction
+------------------------
+
+Keystone is an HTTP front-end to several services. Like other OpenStack
+applications, this is done using python WSGI interfaces and applications are
+configured together using Paste_. The application's HTTP endpoints are made up
+of pipelines of WSGI middleware, such as:
+
+.. code-block:: ini
+
+    [pipeline:api_v3]
+    pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth
+    json_body ec2_extension_v3 s3_extension service_v3
+
+These in turn use a subclass of :mod:`keystone.common.wsgi.ComposingRouter` to
+link URLs to Controllers (a subclass of
+:mod:`keystone.common.wsgi.Application`). Within each Controller, one or more
+Managers are loaded (for example, see :mod:`keystone.catalog.core.Manager`),
+which are thin wrapper classes which load the appropriate service driver based
+on the Keystone configuration.
+
+* Assignment
+
+ * :mod:`keystone.assignment.controllers.GrantAssignmentV3`
+ * :mod:`keystone.assignment.controllers.ProjectAssignmentV3`
+ * :mod:`keystone.assignment.controllers.TenantAssignment`
+ * :mod:`keystone.assignment.controllers.Role`
+ * :mod:`keystone.assignment.controllers.RoleAssignmentV2`
+ * :mod:`keystone.assignment.controllers.RoleAssignmentV3`
+ * :mod:`keystone.assignment.controllers.RoleV3`
+
+* Authentication
+
+ * :mod:`keystone.auth.controllers.Auth`
+
+* Catalog
+
+ * :mod:`keystone.catalog.controllers.EndpointV3`
+ * :mod:`keystone.catalog.controllers.RegionV3`
+ * :mod:`keystone.catalog.controllers.ServiceV3`
+
+* Identity
+
+ * :mod:`keystone.identity.controllers.GroupV3`
+ * :mod:`keystone.identity.controllers.UserV3`
+
+* Policy
+
+ * :mod:`keystone.policy.controllers.PolicyV3`
+
+* Resource
+
+ * :mod:`keystone.resource.controllers.DomainV3`
+ * :mod:`keystone.resource.controllers.ProjectV3`
+
+* Token
+
+ * :mod:`keystone.token.controllers.Auth`
+
+
+.. _Paste: http://pythonpaste.org/
+
+
+----------------
+Service Backends
+----------------
+
+Each of the services can be configured to use a backend to allow Keystone to fit a
+variety of environments and needs. The backend for each service is defined in
+the keystone.conf file with the key ``driver`` under a group associated with
+each service.
+
+A general class under each backend named ``Driver`` exists to provide an
+abstract base class for any implementations, identifying the expected service
+implementations. The drivers for the services are:
+
+* :mod:`keystone.assignment.core.Driver`
+* :mod:`keystone.assignment.core.RoleDriver`
+* :mod:`keystone.catalog.core.Driver`
+* :mod:`keystone.identity.core.Driver`
+* :mod:`keystone.policy.core.Driver`
+* :mod:`keystone.resource.core.Driver`
+* :mod:`keystone.token.core.Driver`
+
+If you implement a backend driver for one of the Keystone services, you're
+expected to subclass from these classes.
+
+
+SQL Backend
+-----------
+
+A SQL based backend using SQLAlchemy to store data persistently. The
+``keystone-manage`` command introspects the backends to identify SQL based backends
+when running "db_sync" to establish or upgrade schema. If the backend driver
+has a method db_sync(), it will be invoked to sync and/or migrate schema.
+
+
+Templated Backend
+-----------------
+
+Largely designed for a common use case around service catalogs in the Keystone
+project, a Catalog backend that simply expands pre-configured templates to
+provide catalog data.
+
+Example paste.deploy config (uses $ instead of % to avoid ConfigParser's
+interpolation)::
+
+  [DEFAULT]
+  catalog.RegionOne.identity.publicURL = http://localhost:$(public_port)s/v2.0
+  catalog.RegionOne.identity.adminURL = http://localhost:$(public_port)s/v2.0
+  catalog.RegionOne.identity.internalURL = http://localhost:$(public_port)s/v2.0
+  catalog.RegionOne.identity.name = 'Identity Service'
+
+
+LDAP Backend
+------------
+
+The LDAP backend stores Users and Projects in separate Subtrees. Roles are recorded
+as entries under the Projects.
+
+
+----------
+Data Model
+----------
+
+Keystone was designed from the ground up to be amenable to multiple styles of
+backends and as such many of the methods and data types will happily accept
+more data than they know what to do with and pass them on to a backend.
+
+There are a few main data types:
+
+ * **User**: has account credentials, is associated with one or more projects or domains
+ * **Group**: a collection of users, is associated with one or more projects or domains
+ * **Project**: unit of ownership in OpenStack, contains one or more users
+ * **Domain**: unit of ownership in OpenStack, contains users, groups and projects
+ * **Role**: a first-class piece of metadata associated with many user-project pairs.
+ * **Token**: identifying credential associated with a user or user and project
+ * **Extras**: bucket of key-value metadata associated with a user-project pair.
+ * **Rule**: describes a set of requirements for performing an action.
+
+While the general data model allows a many-to-many relationship between Users
+and Groups to Projects and Domains; the actual backend implementations take
+varying levels of advantage of that functionality.
+
+
+----------------
+Approach to CRUD
+----------------
+
+While it is expected that any "real" deployment at a large company will manage
+their users, groups, projects and domains in their existing user systems, a
+variety of CRUD operations are provided for the sake of development and testing.
+
+CRUD is treated as an extension or additional feature to the core feature set
+in that it is not required that a backend support it. It is expected that
+backends for services that don't support the CRUD operations will raise a
+:mod:`keystone.exception.NotImplemented`.
+
+
+----------------------------------
+Approach to Authorization (Policy)
+----------------------------------
+
+Various components in the system require that different actions are allowed
+based on whether the user is authorized to perform that action.
+
+For the purposes of Keystone there are only a couple levels of authorization
+being checked for:
+
+ * Require that the performing user is considered an admin.
+ * Require that the performing user matches the user being referenced.
+
+Other systems wishing to use the policy engine will require additional styles
+of checks and will possibly write completely custom backends. By default,
+Keystone leverages Policy enforcement that is maintained in Oslo-Incubator,
+found in `keystone/openstack/common/policy.py`.
+
+
+Rules
+-----
+
+Given a list of matches to check for, simply verify that the credentials
+contain the matches. For example:
+
+.. code-block:: python
+
+  credentials = {'user_id': 'foo', 'is_admin': 1, 'roles': ['nova:netadmin']}
+
+  # An admin only call:
+  policy_api.enforce(('is_admin:1',), credentials)
+
+  # An admin or owner call:
+  policy_api.enforce(('is_admin:1', 'user_id:foo'), credentials)
+
+  # A netadmin call:
+  policy_api.enforce(('roles:nova:netadmin',), credentials)
+
+Credentials are generally built from the user metadata in the 'extras' part
+of the Identity API. So, adding a 'role' to the user just means adding the role
+to the user metadata.
+
+
+Capability RBAC
+---------------
+
+(Not yet implemented.)
+
+Another approach to authorization can be action-based, with a mapping of roles
+to which capabilities are allowed for that role. For example:
+
+.. code-block:: python
+
+  credentials = {'user_id': 'foo', 'is_admin': 1, 'roles': ['nova:netadmin']}
+
+  # add a policy
+  policy_api.add_policy('action:nova:add_network', ('roles:nova:netadmin',))
+
+  policy_api.enforce(('action:nova:add_network',), credentials)
+
+In the backend this would look up the policy for 'action:nova:add_network' and
+then do what is effectively a 'Simple Match' style match against the credentials.
diff --git a/keystone-moon/doc/source/cli_examples.rst b/keystone-moon/doc/source/cli_examples.rst
new file mode 100644 (file)
index 0000000..57735db
--- /dev/null
@@ -0,0 +1,316 @@
+..
+      Copyright 2011-2012 OpenStack Foundation
+      All Rights Reserved.
+
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+===============================
+Command Line Interface Examples
+===============================
+
+The Keystone command line interface packaged in `python-keystoneclient`_ only
+supports the Identity v2.0 API. The OpenStack common command line interface
+packaged in `python-openstackclient`_  supports both v2.0 and v3 APIs.
+
+.. NOTE::
+
+    As of the Juno release, it is recommended to use ``python-openstackclient``,
+    as it supports both v2.0 and v3 APIs. For the purpose of backwards compatibility,
+    the CLI packaged in ``python-keystoneclient`` is not being removed.
+
+.. _`python-openstackclient`: http://docs.openstack.org/developer/python-openstackclient/
+.. _`python-keystoneclient`: http://docs.openstack.org/developer/python-keystoneclient/
+
+Using python-openstackclient (v3 or v2.0)
+=========================================
+
+A complete list of OpenStackClient commands with full examples are located at
+OpenStackClient's `Command List`_ page. Additionally, for details related to
+authentication, refer to OpenStackClient's `Authentication`_ page.
+
+.. _`Command List`: http://docs.openstack.org/developer/python-openstackclient/command-list.html
+.. _`Authentication`: http://docs.openstack.org/developer/python-openstackclient/authentication.html
+
+Using python-keystoneclient (v2.0-only)
+=======================================
+
+-------
+Tenants
+-------
+
+``tenant-create``
+-----------------
+
+keyword arguments
+
+* name
+* description (optional, defaults to None)
+* enabled (optional, defaults to True)
+
+example:
+
+.. code-block:: bash
+
+    $ keystone tenant-create --name=demo
+
+creates a tenant named "demo".
+
+``tenant-delete``
+-----------------
+
+arguments
+
+* tenant_id
+
+example:
+
+.. code-block:: bash
+
+    $ keystone tenant-delete f2b7b39c860840dfa47d9ee4adffa0b3
+
+-----
+Users
+-----
+
+``user-create``
+---------------
+
+keyword arguments
+
+* name
+* pass
+* email
+* tenant_id (optional, defaults to None)
+* enabled (optional, defaults to True)
+
+example:
+
+.. code-block:: bash
+
+    $ keystone user-create
+    --name=admin \
+    --pass=secrete \
+    --tenant_id=2395953419144b67955ac4bab96b8fd2 \
+    --email=admin@example.com
+
+``user-delete``
+---------------
+
+keyword arguments
+
+* user_id
+
+example:
+
+.. code-block:: bash
+
+    $ keystone user-delete f2b7b39c860840dfa47d9ee4adffa0b3
+
+``user-list``
+-------------
+
+list users in the system, optionally by a specific tenant (identified by tenant_id)
+
+arguments
+
+* tenant_id (optional, defaults to None)
+
+example:
+
+.. code-block:: bash
+
+    $ keystone user-list
+
+``user-update``
+---------------------
+
+arguments
+
+* user_id
+
+keyword arguments
+
+* name     Desired new user name (Optional)
+* email    Desired new email address (Optional)
+* enabled <true|false>   Enable or disable user (Optional)
+
+
+example:
+
+.. code-block:: bash
+
+    $ keystone user-update 03c84b51574841ba9a0d8db7882ac645 --email=newemail@example.com
+
+``user-password-update``
+------------------------
+
+arguments
+
+* user_id
+* password
+
+example:
+
+.. code-block:: bash
+
+    $ keystone user-password-update --pass foo 03c84b51574841ba9a0d8db7882ac645
+
+-----
+Roles
+-----
+
+``role-create``
+---------------
+
+arguments
+
+* name
+
+example:
+
+.. code-block:: bash
+
+    $ keystone role-create --name=demo
+
+``role-delete``
+---------------
+
+arguments
+
+* role_id
+
+example:
+
+.. code-block:: bash
+
+    $ keystone role-delete 19d1d3344873464d819c45f521ff9890
+
+``role-list``
+-------------
+
+example:
+
+.. code-block:: bash
+
+    $ keystone role-list
+
+``role-get``
+------------
+
+arguments
+
+* role_id
+
+example:
+
+.. code-block:: bash
+
+    $ keystone role-get 19d1d3344873464d819c45f521ff9890
+
+
+``user-role-add``
+-----------------
+
+keyword arguments
+
+* user <user-id>
+* role <role-id>
+* tenant_id <tenant-id>
+
+example:
+
+.. code-block:: bash
+
+    $ keystone user-role-add  \
+      --user=96a6ebba0d4c441887aceaeced892585  \
+      --role=f8dd5a2e4dc64a41b96add562d9a764e  \
+      --tenant_id=2395953419144b67955ac4bab96b8fd2
+
+``user-role-remove``
+--------------------
+
+keyword arguments
+
+* user <user-id>
+* role <role-id>
+* tenant_id <tenant-id>
+
+example:
+
+.. code-block:: bash
+
+    $ keystone user-role-remove  \
+      --user=96a6ebba0d4c441887aceaeced892585  \
+      --role=f8dd5a2e4dc64a41b96add562d9a764e  \
+      --tenant_id=2395953419144b67955ac4bab96b8fd2
+
+--------
+Services
+--------
+
+``service-create``
+------------------
+
+keyword arguments
+
+* name
+* type
+* description
+
+example:
+
+.. code-block:: bash
+
+    $ keystone service-create \
+    --name=nova \
+    --type=compute \
+    --description="Nova Compute Service"
+
+``service-list``
+----------------
+
+arguments
+
+* service_id
+
+example:
+
+.. code-block:: bash
+
+    $ keystone service-list
+
+``service-get``
+---------------
+
+arguments
+
+* service_id
+
+example:
+
+.. code-block:: bash
+
+    $ keystone service-get 08741d8ed88242ca88d1f61484a0fe3b
+
+``service-delete``
+------------------
+
+arguments
+
+* service_id
+
+example:
+
+.. code-block:: bash
+
+    $ keystone service-delete 08741d8ed88242ca88d1f61484a0fe3b
diff --git a/keystone-moon/doc/source/community.rst b/keystone-moon/doc/source/community.rst
new file mode 100644 (file)
index 0000000..e1df0b8
--- /dev/null
@@ -0,0 +1,101 @@
+..
+      Copyright 2011-2012 OpenStack Foundation
+      All Rights Reserved.
+
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+================
+Getting Involved
+================
+
+The OpenStack community is a very friendly group and there are places online to
+join in with the community. Feel free to ask questions. This document points
+you to some of the places where you can communicate with people.
+
+How to Join the Community
+=========================
+
+Our community welcomes all people interested in open source cloud computing,
+and there are no formal membership requirements. The best way to join the
+community is to talk with others online or at a meetup and offer contributions
+through Launchpad_, the wiki_, or blogs. We welcome all types of contributions,
+
+from blueprint designs to documentation to testing to deployment scripts.
+
+.. _Launchpad: https://launchpad.net/keystone
+.. _wiki: http://wiki.openstack.org/
+
+#openstack-keystone on Freenode IRC Network
+-------------------------------------------
+
+You can find Keystone folks in `<irc://freenode.net/#openstack-keystone>`_.
+This is usually the best place to ask questions and find your way around. IRC
+stands for Internet Relay Chat and it is a way to chat online in real time.
+You can also ask a question and come back to the log files to read the answer
+later. Logs for the #openstack IRC channels are stored at
+`<http://eavesdrop.openstack.org/irclogs/>`_.
+
+For more information regarding OpenStack IRC channels please visit the
+`OpenStack IRC Wiki <https://wiki.openstack.org/wiki/IRC>`_.
+
+OpenStack Wiki
+--------------
+
+The wiki is a living source of knowledge. It is edited by the community, and
+has collections of links and other sources of information. Typically the pages
+are a good place to write drafts for specs or documentation, describe a
+blueprint, or collaborate with others.
+
+`OpenStack Wiki <http://wiki.openstack.org/>`_
+
+* `useful Keystone project links <http://wiki.openstack.org/Keystone>`_
+
+Keystone on Launchpad
+---------------------
+
+Launchpad is a code hosting that OpenStack is using to track bugs, feature
+work, and releases of OpenStack. Like other OpenStack projects, Keystone source
+code is hosted on GitHub
+
+* `Keystone Project Page on Launchpad <http://launchpad.net/keystone>`_
+* `Keystone Source Repository on GitHub <http://github.com/openstack/keystone>`_
+
+Within launchpad, we use
+`blueprints <https://blueprints.launchpad.net/keystone>`_, to track feature
+work, and track `bugs <https://bugs.launchpad.net/keystone>`_ as well. If
+you are looking for a place to get started contributing to keystone, please
+look at any bugs for Keystone that are tagged as `low-hanging-fruit
+<https://bugs.launchpad.net/keystone/+bugs?field.tag=low-hanging-fruit>`_.
+
+OpenStack Blog
+--------------
+
+The OpenStack blog includes a weekly newsletter that aggregates OpenStack news
+from around the internet, as well as providing inside information on upcoming
+events and posts from OpenStack contributors.
+
+`OpenStack Blog <http://openstack.org/blog>`_
+
+See also: `Planet OpenStack <http://planet.openstack.org/>`_, an aggregation of
+blogs about OpenStack from around the internet, combined into a web site and
+RSS feed. If you'd like to contribute with your blog posts, there are
+instructions for `adding your blog <http://wiki.openstack.org/AddingYourBlog>`_.
+
+
+Twitter
+-------
+
+Because all the cool kids do it: `@openstack <http://twitter.com/openstack>`_.
+Also follow the `#openstack <http://search.twitter.com/search?q=%23openstack>`_
+tag for relevant tweets.
+
diff --git a/keystone-moon/doc/source/conf.py b/keystone-moon/doc/source/conf.py
new file mode 100644 (file)
index 0000000..fe46f32
--- /dev/null
@@ -0,0 +1,274 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# keystone documentation build configuration file, created by
+# sphinx-quickstart on Mon Jan  9 12:02:59 2012.
+#
+# This file is execfile()d with the current directory set to its
+#  containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath('..'))  # NOTE(dstanek): path for our
+                                           # Sphinx extension
+
+# NOTE(dstanek): adds _ to the builtins so keystone modules can be imported
+__builtins__['_'] = str
+
+# -- General configuration ----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = ['sphinx.ext.autodoc',
+              'sphinx.ext.todo',
+              'sphinx.ext.coverage',
+              'sphinx.ext.viewcode',
+              'oslosphinx',
+              # NOTE(dstanek): Uncomment the [pbr] section in setup.cfg and
+              # remove this Sphinx extension when
+              # https://launchpad.net/bugs/1260495 is fixed.
+              'ext.apidoc',
+              ]
+
+todo_include_todos = True
+
+# Add any paths that contain templates here, relative to this directory.
+# if os.getenv('HUDSON_PUBLISH_DOCS'):
+#     templates_path = ['_ga', '_templates']
+# else:
+#     templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'keystone'
+copyright = u'2012, OpenStack, LLC'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['old']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+show_authors = True
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+modindex_common_prefix = ['keystone.']
+
+# -- Options for man page output --------------------------------------------
+
+# Grouping the document tree for man pages.
+# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
+
+man_pages = [
+    ('man/keystone-manage', 'keystone-manage', u'Keystone Management Utility',
+     [u'OpenStack'], 1),
+    ('man/keystone-all', 'keystone-all', u'Keystone Startup Command',
+     [u'OpenStack'], 1),
+]
+
+
+# -- Options for HTML output --------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+# html_theme_path = ["."]
+# html_theme = '_theme'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+#html_static_path = ['images']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
+html_last_updated_fmt = os.popen(git_cmd).read()
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'keystonedoc'
+
+
+# -- Options for LaTeX output -------------------------------------------------
+
+latex_elements = {
+    # The paper size ('letterpaper' or 'a4paper').
+    #'papersize': 'letterpaper',
+
+    # The font size ('10pt', '11pt' or '12pt').
+    #'pointsize': '10pt',
+
+    # Additional stuff for the LaTeX preamble.
+    #'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples (source
+# start file, target name, title, author, documentclass
+# [howto/manual]).
+latex_documents = [
+    ('index', 'keystone.tex', u'Keystone Documentation',
+     u'OpenStack', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for Texinfo output -----------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+    ('index', 'keystone', u'Keystone Documentation',
+     u'OpenStack', 'keystone', 'One line description of project.',
+     'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+#intersphinx_mapping = {'http://docs.python.org/': None}
diff --git a/keystone-moon/doc/source/configuration.rst b/keystone-moon/doc/source/configuration.rst
new file mode 100644 (file)
index 0000000..e365f0e
--- /dev/null
@@ -0,0 +1,1734 @@
+..
+      Copyright 2011-2012 OpenStack Foundation
+      All Rights Reserved.
+
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+====================
+Configuring Keystone
+====================
+
+.. toctree::
+   :maxdepth: 1
+
+   man/keystone-manage
+   man/keystone-all
+
+Once Keystone is installed, it is configured via a primary configuration file
+(``etc/keystone.conf``), a PasteDeploy configuration file
+(``etc/keystone-paste.ini``), possibly a separate logging configuration file,
+and initializing data into Keystone using the command line client.
+
+By default, Keystone starts a service on `IANA-assigned port 35357
+<http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt>`_.
+This may overlap with your system's ephemeral port range, so another process
+may already be using this port without being explicitly configured to do so. To
+prevent this scenario from occurring, it's recommended that you explicitly
+exclude port 35357 from the available ephemeral port range. On a Linux system,
+this would be accomplished by:
+
+.. code-block:: bash
+
+    $ sysctl -w 'sys.net.ipv4.ip_local_reserved_ports=35357'
+
+To make the above change persistent,
+``net.ipv4.ip_local_reserved_ports = 35357`` should be added to
+``/etc/sysctl.conf`` or to ``/etc/sysctl.d/keystone.conf``.
+
+Starting and Stopping Keystone under Eventlet
+=============================================
+
+.. WARNING::
+
+    Running keystone under eventlet has been deprecated as of the Kilo release.
+    Support for utilizing eventlet will be removed as of the M-release. The
+    recommended deployment is to run keystone in a WSGI server
+    (e.g. ``mod_wsgi`` under ``HTTPD``).
+
+Keystone can be run using either its built-in eventlet server or it can be run
+embedded in a web server. While the eventlet server is convenient and easy to
+use, it's lacking in security features that have been developed into Internet-
+based web servers over the years. As such, running the eventlet server as
+described in this section is not recommended.
+
+Start Keystone services using the command:
+
+.. code-block:: bash
+
+    $ keystone-all
+
+Invoking this command starts up two ``wsgi.Server`` instances, ``admin`` (the
+administration API) and ``main`` (the primary/public API interface). Both
+services are configured to run in a single process.
+
+.. NOTE::
+
+    The separation into ``admin`` and ``main`` interfaces is an historical
+    anomaly. The new V3 API provides the same interface on both the admin and
+    main interfaces (this can be configured in ``keystone-paste.ini``, but the
+    default is to have both the same). The V2.0 API provides a limited public
+    API (getting and validating tokens) on ``main``, and an administrative API
+    (which can include creating users and such) on the ``admin`` interface.
+
+Stop the process using ``Control-C``.
+
+.. NOTE::
+
+    If you have not already configured Keystone, it may not start as expected.
+
+
+Configuration Files
+===================
+
+The Keystone configuration files are an ``ini`` file format based on Paste_, a
+common system used to configure Python WSGI based applications. The PasteDeploy
+configuration entries (WSGI pipeline definitions) can be provided in a separate
+``keystone-paste.ini`` file, while general and driver-specific configuration
+parameters are in the primary configuration file ``keystone.conf``.
+
+.. NOTE::
+
+   Since keystone's PasteDeploy configuration file has been separated
+   from the main keystone configuration file, ``keystone.conf``, all
+   local configuration or driver-specific configuration parameters must
+   go in the main keystone configuration file instead of the PasteDeploy
+   configuration file, i.e. configuration in ``keystone-paste.ini``
+   is not supported.
+
+The primary configuration file is organized into the following sections:
+
+* ``[DEFAULT]`` - General configuration
+* ``[assignment]`` - Assignment system driver configuration
+* ``[auth]`` - Authentication plugin configuration
+* ``[cache]`` - Caching layer configuration
+* ``[catalog]`` - Service catalog driver configuration
+* ``[credential]`` - Credential system driver configuration
+* ``[endpoint_filter]`` - Endpoint filtering extension configuration
+* ``[endpoint_policy]`` - Endpoint policy extension configuration
+* ``[eventlet_server]`` - Eventlet server configuration
+* ``[eventlet_server_ssl]`` - Eventlet server SSL configuration
+* ``[federation]`` - Federation driver configuration
+* ``[identity]`` - Identity system driver configuration
+* ``[identity_mapping]`` - Identity mapping system driver configuration
+* ``[kvs]`` - KVS storage backend configuration
+* ``[ldap]`` - LDAP configuration options
+* ``[memcache]`` - Memcache configuration options
+* ``[oauth1]`` - OAuth 1.0a system driver configuration
+* ``[os_inherit]`` - Inherited role assignment extension
+* ``[paste_deploy]`` - Pointer to the PasteDeploy configuration file
+* ``[policy]`` - Policy system driver configuration for RBAC
+* ``[resource]`` - Resource system driver configuration
+* ``[revoke]`` - Revocation system driver configuration
+* ``[role]`` - Role system driver configuration
+* ``[saml]`` - SAML configuration options
+* ``[signing]`` - Cryptographic signatures for PKI based tokens
+* ``[ssl]`` - SSL certificate generation configuration
+* ``[token]`` - Token driver & token provider configuration
+* ``[trust]`` - Trust extension configuration
+
+The Keystone primary configuration file is expected to be named
+``keystone.conf``. When starting Keystone, you can specify a different
+configuration file to use with ``--config-file``. If you do **not** specify a
+configuration file, Keystone will look in the following directories for a
+configuration file, in order:
+
+* ``~/.keystone/``
+* ``~/``
+* ``/etc/keystone/``
+* ``/etc/``
+
+PasteDeploy configuration file is specified by the ``config_file`` parameter in
+``[paste_deploy]`` section of the primary configuration file. If the parameter
+is not an absolute path, then Keystone looks for it in the same directories as
+above. If not specified, WSGI pipeline definitions are loaded from the primary
+configuration file.
+
+Domain-specific Drivers
+-----------------------
+
+.. NOTE::
+
+    This functionality is new in Juno.
+
+Keystone supports the option (disabled by default) to specify identity driver
+configurations on a domain by domain basis, allowing, for example, a specific
+domain to have its own LDAP or SQL server. This is configured by specifying the
+following options:
+
+.. code-block:: ini
+
+ [identity]
+ domain_specific_drivers_enabled = True
+ domain_config_dir = /etc/keystone/domains
+
+Setting ``domain_specific_drivers_enabled`` to ``True`` will enable this
+feature, causing Keystone to look in the ``domain_config_dir`` for config files
+of the form::
+
+ keystone.<domain_name>.conf
+
+Options given in the domain specific configuration file will override those in
+the primary configuration file for the specified domain only. Domains without a
+specific configuration file will continue to use the options from the primary
+configuration file.
+
+.. NOTE::
+
+    It is important to notice that by enabling this configuration, the
+    operations of listing all users and listing all groups are not supported,
+    those calls will need that either a domain filter is specified or the usage
+    of a domain scoped token.
+
+.. NOTE::
+
+    Keystone does not support moving the contents of a domain (i.e. "its" users
+    and groups) from one backend to another, nor group membership across
+    backend boundaries.
+
+.. NOTE::
+
+    To delete a domain that uses a domain specific backend, it's necessary to
+    first disable it, remove its specific configuration file (i.e. its
+    corresponding keystone.<domain_name>.conf) and then restart the Identity
+    server.
+
+.. NOTE::
+
+    Although Keystone supports multiple LDAP backends via domain specific
+    configuration files, it currently only supports one SQL backend. This could
+    be either the default driver or a single domain-specific backend, perhaps
+    for storing service users in a predominantly LDAP installation.
+
+Due to the need for user and group IDs to be unique across an OpenStack
+installation and for Keystone to be able to deduce which domain and backend to
+use from just a user or group ID, it dynamically builds a persistent identity
+mapping table from a public ID to the actual domain, local ID (within that
+backend) and entity type. The public ID is automatically generated by Keystone
+when it first encounters the entity. If the local ID of the entity is from a
+backend that does not guarantee to generate UUIDs, a hash algorithm will
+generate a public ID for that entity, which is what will be exposed by
+Keystone.
+
+The use of a hash will ensure that if the public ID needs to be regenerated
+then the same public ID will be created. This is useful if you are running
+multiple keystones and want to ensure the same ID would be generated whichever
+server you hit.
+
+While Keystone will dynamically maintain the identity mapping, including
+removing entries when entities are deleted via the Keystone, for those entities
+in backends that are managed outside of Keystone (e.g. a Read Only LDAP),
+Keystone will not know if entities have been deleted and hence will continue to
+carry stale identity mappings in its table. While benign, keystone provides an
+ability for operators to purge the mapping table of such stale entries using
+the keystone-manage command, for example:
+
+.. code-block:: bash
+
+    $ keystone-manage mapping_purge --domain-name DOMAINA --local-id abc@de.com
+
+A typical usage would be for an operator to obtain a list of those entries in
+an external backend that had been deleted out-of-band to Keystone, and then
+call keystone-manage to purge those entries by specifying the domain and
+local-id. The type of the entity (i.e. user or group) may also be specified if
+this is needed to uniquely identify the mapping.
+
+Since public IDs can be regenerated **with the correct generator
+implementation**, if the details of those entries that have been deleted are
+not available, then it is safe to simply bulk purge identity mappings
+periodically, for example:
+
+.. code-block:: bash
+
+    $ keystone-manage mapping_purge --domain-name DOMAINA
+
+will purge all the mappings for DOMAINA. The entire mapping table can be purged
+with the following command:
+
+.. code-block:: bash
+
+    $ keystone-manage mapping_purge --all
+
+Public ID Generators
+--------------------
+
+Keystone supports a customizable public ID generator and it is specified in the
+``[identity_mapping]`` section of the configuration file. Keystone provides a
+sha256 generator as default, which produces regeneratable public IDs. The
+generator algorithm for public IDs is a balance between key size (i.e. the
+length of the public ID), the probability of collision and, in some
+circumstances, the security of the public ID. The maximum length of public ID
+supported by Keystone is 64 characters, and the default generator (sha256) uses
+this full capability. Since the public ID is what is exposed externally by
+Keystone and potentially stored in external systems, some installations may
+wish to make use of other generator algorithms that have a different trade-off
+of attributes. A different generator can be installed by configuring the
+following property:
+
+* ``generator`` - identity mapping generator. Defaults to
+  ``keystone.identity.generators.sha256.Generator``
+
+.. WARNING::
+
+    Changing the generator may cause all existing public IDs to be become
+    invalid, so typically the generator selection should be considered
+    immutable for a given installation.
+
+Authentication Plugins
+----------------------
+
+.. NOTE::
+
+    This feature is only supported by Keystone for the Identity API v3 clients.
+
+Keystone supports authentication plugins and they are specified in the
+``[auth]`` section of the configuration file. However, an authentication plugin
+may also have its own section in the configuration file. It is up to the plugin
+to register its own configuration options.
+
+* ``methods`` - comma-delimited list of authentication plugin names
+* ``<plugin name>`` - specify the class which handles to authentication method,
+  in the same manner as one would specify a backend driver.
+
+Keystone provides three authentication methods by default. ``password`` handles
+password authentication and ``token`` handles token authentication.
+``external`` is used in conjunction with authentication performed by a
+container web server that sets the ``REMOTE_USER`` environment variable. For
+more details, refer to :doc:`External Authentication <external-auth>`.
+
+How to Implement an Authentication Plugin
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+All authentication plugins must extend the
+``keystone.auth.core.AuthMethodHandler`` class and implement the
+``authenticate()`` method. The ``authenticate()`` method expects the following
+parameters.
+
+* ``context`` - Keystone's request context
+* ``auth_payload`` - the content of the authentication for a given method
+* ``auth_context`` - user authentication context, a dictionary shared by all
+  plugins. It contains ``method_names`` and ``extras`` by default.
+  ``method_names`` is a list and ``extras`` is a dictionary.
+
+If successful, the ``authenticate()`` method must provide a valid ``user_id``
+in ``auth_context`` and return ``None``. ``method_name`` is used to convey any
+additional authentication methods in case authentication is for re-scoping. For
+example, if the authentication is for re-scoping, a plugin must append the
+previous method names into ``method_names``. Also, a plugin may add any
+additional information into ``extras``. Anything in ``extras`` will be conveyed
+in the token's ``extras`` field.
+
+If authentication requires multiple steps, the ``authenticate()`` method must
+return the payload in the form of a dictionary for the next authentication
+step.
+
+If authentication is unsuccessful, the ``authenticate()`` method must raise a
+``keystone.exception.Unauthorized`` exception.
+
+Simply add the new plugin name to the ``methods`` list along with your plugin
+class configuration in the ``[auth]`` sections of the configuration file to
+deploy it.
+
+If the plugin requires additional configurations, it may register its own
+section in the configuration file.
+
+Plugins are invoked in the order in which they are specified in the ``methods``
+attribute of the ``authentication`` request body. If multiple plugins are
+invoked, all plugins must succeed in order to for the entire authentication to
+be successful. Furthermore, all the plugins invoked must agree on the
+``user_id`` in the ``auth_context``.
+
+The ``REMOTE_USER`` environment variable is only set from a containing
+webserver. However, to ensure that a user must go through other authentication
+mechanisms, even if this variable is set, remove ``external`` from the list of
+plugins specified in ``methods``. This effectively disables external
+authentication. For more details, refer to :doc:`ExternalAuthentication
+<external-auth>`.
+
+
+Token Persistence Driver
+------------------------
+
+Keystone supports customizable token persistence drivers. These can be
+specified in the ``[token]`` section of the configuration file. Keystone
+provides three non-test persistence backends. These can be set with the
+``[token]\driver`` configuration option.
+
+The drivers Keystone provides are:
+
+* ``keystone.token.persistence.backends.memcache_pool.Token`` - The pooled
+  memcached token persistence engine. This backend supports the concept of
+  pooled memcache client object (allowing for the re-use of the client
+  objects). This backend has a number of extra tunable options in the
+  ``[memcache]`` section of the config.
+
+* ``keystone.token.persistence.backends.sql.Token`` - The SQL-based (default)
+  token persistence engine.
+
+* ``keystone.token.persistence.backends.memcache.Token`` - The memcached based
+  token persistence backend. This backend relies on ``dogpile.cache`` and
+  stores the token data in a set of memcached servers. The servers URLs are
+  specified in the ``[memcache]\servers`` configuration option in the Keystone
+  config.
+
+
+.. WARNING::
+    It is recommended you use the
+    ``keystone.token.persistence.backends.memcache_pool.Token`` backend instead
+    of ``keystone.token.persistence.backends.memcache.Token`` as the token
+    persistence driver if you are deploying Keystone under eventlet instead of
+    Apache + mod_wsgi. This recommendation is due to known issues with the use
+    of ``thread.local`` under eventlet that can allow the leaking of memcache
+    client objects and consumption of extra sockets.
+
+
+Token Provider
+--------------
+
+Keystone supports customizable token provider and it is specified in the
+``[token]`` section of the configuration file. Keystone provides both UUID and
+PKI token providers. However, users may register their own token provider by
+configuring the following property.
+
+* ``provider`` - token provider driver. Defaults to
+  ``keystone.token.providers.uuid.Provider``
+
+
+UUID, PKI, PKIZ, or Fernet?
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Each token format uses different technologies to achieve various performance,
+scaling and architectural requirements.
+
+UUID tokens contain randomly generated UUID4 payloads that are issued and
+validated by the identity service. They are encoded using their hex digest for
+transport and are thus URL-friendly. They must be persisted by the identity
+service in order to be later validated. Revoking them is simply a matter of
+deleting them from the token persistence backend.
+
+Both PKI and PKIZ tokens contain JSON payloads that represent the entire token
+validation response that would normally be retrieved from keystone. The payload
+is then signed using `Cryptographic Message Syntax (CMS)
+<http://en.wikipedia.org/wiki/Cryptographic_Message_Syntax>`_. The combination
+of CMS and the exhaustive payload allows PKI and PKIZ tokens to be verified
+offline using keystone's public signing key. The only reason for them to be
+persisted by the identity service is to later build token revocation *lists*
+(explicit lists of tokens that have been revoked), otherwise they are
+theoretically ephemeral when supported by token revocation *events* (which
+describe invalidated tokens rather than enumerate them). PKIZ tokens add zlib
+compression after signing to achieve a smaller overall token size. To make them
+URL-friendly, PKI tokens are base64 encoded and then arbitrarily manipulated to
+replace unsafe characters with safe ones whereas PKIZ tokens use conventional
+base64url encoding. Due to the size of the payload and the overhead incurred by
+the CMS format, both PKI and PKIZ tokens may be too long to fit in either
+headers or URLs if they contain extensive service catalogs or other additional
+attributes. Some third-party applications such as web servers and clients may
+need to be recompiled from source to customize the limitations that PKI and
+PKIZ tokens would otherwise exceed). Both PKI and PKIZ tokens require signing
+certificates which may be created using ``keystone-manage pki_setup`` for
+demonstration purposes (this is not recommended for production deployments: use
+certificates issued by an trusted CA instead).
+
+Fernet tokens contain a limited amount of identity and authorization data in a
+`MessagePacked <http://msgpack.org/>`_ payload. The payload is then wrapped as
+a `Fernet <https://github.com/fernet/spec>`_ message for transport, where
+Fernet provides the required web safe characteristics for use in URLs and
+headers. Fernet tokens require symmetric encryption keys which can be
+established using ``keystone-manage fernet_setup`` and periodically rotated
+using ``keystone-manage fernet_rotate``.
+
+.. WARNING::
+    UUID, PKI, PKIZ, and Fernet tokens are all bearer tokens, meaning that they
+    must be protected from unnecessary disclosure to prevent unauthorized
+    access.
+
+Caching Layer
+-------------
+
+Keystone supports a caching layer that is above the configurable subsystems
+(e.g. ``token``, ``identity``, etc). Keystone uses the `dogpile.cache`_ library
+which allows for flexible cache backends. The majority of the caching
+configuration options are set in the ``[cache]`` section. However, each section
+that has the capability to be cached usually has a ``caching`` boolean value
+that will toggle caching for that specific section. The current default
+behavior is that subsystem caching is enabled, but the global toggle is set to
+disabled.
+
+``[cache]`` configuration section:
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* ``enabled`` - enables/disables caching across all of keystone
+* ``debug_cache_backend`` - enables more in-depth logging from the cache
+  backend (get, set, delete, etc)
+* ``backend`` - the caching backend module to use e.g.
+  ``dogpile.cache.memcached``
+
+    .. NOTE::
+        A given ``backend`` must be registered with ``dogpile.cache`` before it
+        can be used. The default backend is the ``Keystone`` no-op backend
+        (``keystone.common.cache.noop``). If caching is desired a different
+        backend will need to be specified. Current functional backends are:
+
+    * ``dogpile.cache.memcached`` - Memcached backend using the standard
+      `python-memcached`_ library
+    * ``dogpile.cache.pylibmc`` - Memcached backend using the `pylibmc`_
+      library
+    * ``dogpile.cache.bmemcached`` - Memcached using `python-binary-memcached`_
+      library.
+    * ``dogpile.cache.redis`` - `Redis`_ backend
+    * ``dogpile.cache.dbm`` - local DBM file backend
+    * ``dogpile.cache.memory`` - in-memory cache
+    * ``keystone.cache.mongo`` - MongoDB as caching backend
+    * ``keystone.cache.memcache_pool`` - An eventlet safe implementation of
+      ``dogpile.cache.memcached``. This implementation also provides client
+      connection re-use.
+
+        .. WARNING::
+            ``dogpile.cache.memory`` is not suitable for use outside of unit
+            testing as it does not cleanup its internal cache on cache
+            expiration, does not provide isolation to the cached data (values
+            in the store can be inadvertently changed without extra layers of
+            data protection added), and does not share cache between processes.
+            This means that caching and cache invalidation will not be
+            consistent or reliable when using ``Keystone`` and the
+            ``dogpile.cache.memory`` backend under any real workload.
+
+        .. WARNING::
+            Do not use ``dogpile.cache.memcached`` backend if you are deploying
+            Keystone under eventlet. There are known issues with the use of
+            ``thread.local`` under eventlet that can allow the leaking of
+            memcache client objects and consumption of extra sockets.
+
+* ``expiration_time`` - int, the default length of time to cache a specific
+  value. A value of ``0`` indicates to not cache anything. It is recommended
+  that the ``enabled`` option be used to disable cache instead of setting this
+  to ``0``.
+* ``backend_argument`` - an argument passed to the backend when instantiated
+  ``backend_argument`` should be specified once per argument to be passed to
+  the backend and in the format of ``<argument name>:<argument value>``. e.g.:
+  ``backend_argument = host:localhost``
+* ``proxies`` - comma delimited list of `ProxyBackends`_ e.g.
+  ``my.example.Proxy, my.example.Proxy2``
+
+Current Keystone systems that have caching capabilities:
+    * ``token``
+        The token system has a separate ``cache_time`` configuration option,
+        that can be set to a value above or below the global
+        ``expiration_time`` default, allowing for different caching behavior
+        from the other systems in ``Keystone``. This option is set in the
+        ``[token]`` section of the configuration file.
+
+        The Token Revocation List cache time is handled by the configuration
+        option ``revocation_cache_time`` in the ``[token]`` section. The
+        revocation list is refreshed whenever a token is revoked. It typically
+        sees significantly more requests than specific token retrievals or
+        token validation calls.
+    * ``resource``
+        The resource system has a separate ``cache_time`` configuration option,
+        that can be set to a value above or below the global
+        ``expiration_time`` default, allowing for different caching behavior
+        from the other systems in ``Keystone``. This option is set in the
+        ``[resource]`` section of the configuration file.
+
+        Currently ``resource`` has caching for ``project`` and ``domain``
+        specific requests (primarily around the CRUD actions).  The
+        ``list_projects`` and ``list_domains`` methods are not subject to
+        caching.
+
+        .. WARNING::
+            Be aware that if a read-only ``resource`` backend is in use, the
+            cache will not immediately reflect changes on the back end.  Any
+            given change may take up to the ``cache_time`` (if set in the
+            ``[resource]`` section of the configuration) or the global
+            ``expiration_time`` (set in the ``[cache]`` section of the
+            configuration) before it is reflected. If this type of delay (when
+            using a read-only ``resource`` backend) is an issue, it is
+            recommended that caching be disabled on ``resource``. To disable
+            caching specifically on ``resource``, in the ``[resource]`` section
+            of the configuration set ``caching`` to ``False``.
+    * ``role``
+        Currently ``role`` has caching for ``get_role``, but not for ``list_roles``.
+        The role system has a separate ``cache_time`` configuration option,
+        that can be set to a value above or below the global ``expiration_time``
+        default, allowing for different caching behavior from the other systems in
+        ``Keystone``.  This option is set in the ``[role]`` section of the
+        configuration file.
+
+        .. WARNING::
+            Be aware that if a read-only ``role`` backend is in use, the cache
+            will not immediately reflect changes on the back end.  Any given change
+            may take up to the ``cache_time`` (if set in the ``[role]``
+            section of the configuration) or the global ``expiration_time`` (set in
+            the ``[cache]`` section of the configuration) before it is reflected.
+            If this type of delay (when using a read-only ``role`` backend) is
+            an issue, it is recommended that caching be disabled on ``role``.
+            To disable caching specifically on ``role``, in the ``[role]``
+            section of the configuration set ``caching`` to ``False``.
+
+For more information about the different backends (and configuration options):
+    * `dogpile.cache.backends.memory`_
+    * `dogpile.cache.backends.memcached`_
+    * `dogpile.cache.backends.redis`_
+    * `dogpile.cache.backends.file`_
+    * :py:mod:`keystone.common.cache.backends.mongo`
+
+.. _`dogpile.cache`: http://dogpilecache.readthedocs.org/en/latest/
+.. _`python-memcached`: http://www.tummy.com/software/python-memcached/
+.. _`pylibmc`: http://sendapatch.se/projects/pylibmc/index.html
+.. _`python-binary-memcached`: https://github.com/jaysonsantos/python-binary-memcached
+.. _`Redis`: http://redis.io/
+.. _`dogpile.cache.backends.memory`: http://dogpilecache.readthedocs.org/en/latest/api.html#memory-backend
+.. _`dogpile.cache.backends.memcached`: http://dogpilecache.readthedocs.org/en/latest/api.html#memcached-backends
+.. _`dogpile.cache.backends.redis`: http://dogpilecache.readthedocs.org/en/latest/api.html#redis-backends
+.. _`dogpile.cache.backends.file`: http://dogpilecache.readthedocs.org/en/latest/api.html#file-backends
+.. _`ProxyBackends`: http://dogpilecache.readthedocs.org/en/latest/api.html#proxy-backends
+.. _`PyMongo API`: http://api.mongodb.org/python/current/api/pymongo/index.html
+
+
+Certificates for PKI
+--------------------
+
+PKI stands for Public Key Infrastructure. Tokens are documents,
+cryptographically signed using the X509 standard. In order to work correctly
+token generation requires a public/private key pair. The public key must be
+signed in an X509 certificate, and the certificate used to sign it must be
+available as Certificate Authority (CA) certificate. These files can be either
+externally generated or generated using the ``keystone-manage`` utility.
+
+The files used for signing and verifying certificates are set in the Keystone
+configuration file. The private key should only be readable by the system user
+that will run Keystone. The values that specify the certificates are under the
+``[signing]`` section of the configuration file. The configuration values are:
+
+* ``certfile`` - Location of certificate used to verify tokens. Default is
+  ``/etc/keystone/ssl/certs/signing_cert.pem``
+* ``keyfile`` - Location of private key used to sign tokens. Default is
+  ``/etc/keystone/ssl/private/signing_key.pem``
+* ``ca_certs`` - Location of certificate for the authority that issued the
+  above certificate. Default is ``/etc/keystone/ssl/certs/ca.pem``
+
+Signing Certificate Issued by External CA
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+You may use a signing certificate issued by an external CA instead of generated
+by ``keystone-manage``. However, certificate issued by external CA must satisfy
+the following conditions:
+
+* all certificate and key files must be in Privacy Enhanced Mail (PEM) format
+* private key files must not be protected by a password
+
+The basic workflow for using a signing certificate issued by an external CA
+involves:
+
+1. `Request Signing Certificate from External CA`_
+2. Convert certificate and private key to PEM if needed
+3. `Install External Signing Certificate`_
+
+
+Request Signing Certificate from External CA
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+One way to request a signing certificate from an external CA is to first
+generate a PKCS #10 Certificate Request Syntax (CRS) using OpenSSL CLI.
+
+First create a certificate request configuration file (e.g. ``cert_req.conf``):
+
+.. code-block:: ini
+
+    [ req ]
+    default_bits            = 2048
+    default_keyfile         = keystonekey.pem
+    default_md              = default
+
+    prompt                  = no
+    distinguished_name      = distinguished_name
+
+    [ distinguished_name ]
+    countryName             = US
+    stateOrProvinceName     = CA
+    localityName            = Sunnyvale
+    organizationName        = OpenStack
+    organizationalUnitName  = Keystone
+    commonName              = Keystone Signing
+    emailAddress            = keystone@openstack.org
+
+Then generate a CRS with OpenSSL CLI. **Do not encrypt the generated private
+key. The -nodes option must be used.**
+
+For example:
+
+.. code-block:: bash
+
+    $ openssl req -newkey rsa:2048 -keyout signing_key.pem -keyform PEM -out signing_cert_req.pem -outform PEM -config cert_req.conf -nodes
+
+
+If everything is successfully, you should end up with ``signing_cert_req.pem``
+and ``signing_key.pem``. Send ``signing_cert_req.pem`` to your CA to request a
+token signing certificate and make sure to ask the certificate to be in PEM
+format. Also, make sure your trusted CA certificate chain is also in PEM
+format.
+
+
+Install External Signing Certificate
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Assuming you have the following already:
+
+* ``signing_cert.pem`` - (Keystone token) signing certificate in PEM format
+* ``signing_key.pem`` - corresponding (non-encrypted) private key in PEM format
+* ``cacert.pem`` - trust CA certificate chain in PEM format
+
+Copy the above to your certificate directory. For example:
+
+.. code-block:: bash
+
+    $ mkdir -p /etc/keystone/ssl/certs
+    $ cp signing_cert.pem /etc/keystone/ssl/certs/
+    $ cp signing_key.pem /etc/keystone/ssl/certs/
+    $ cp cacert.pem /etc/keystone/ssl/certs/
+    $ chmod -R 700 /etc/keystone/ssl/certs
+
+**Make sure the certificate directory is root-protected.**
+
+If your certificate directory path is different from the default
+``/etc/keystone/ssl/certs``, make sure it is reflected in the ``[signing]``
+section of the configuration file.
+
+
+Generating a Signing Certificate using pki_setup
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``keystone-manage pki_setup`` is a development tool. We recommend that you do
+not use ``keystone-manage pki_setup`` in a production environment. In
+production, an external CA should be used instead. This is because the CA
+secret key should generally be kept apart from the token signing secret keys so
+that a compromise of a node does not lead to an attacker being able to generate
+valid signed Keystone tokens. This is a low probability attack vector, as
+compromise of a Keystone service machine's filesystem security almost certainly
+means the attacker will be able to gain direct access to the token backend.
+
+When using the ``keystone-manage pki_setup`` to generate the certificates, the
+following configuration options in the ``[signing]`` section are used:
+
+* ``ca_key`` - Default is ``/etc/keystone/ssl/private/cakey.pem``
+* ``key_size`` - Default is ``2048``
+* ``valid_days`` - Default is ``3650``
+
+If ``keystone-manage pki_setup`` is not used then these options don't need to
+be set.
+
+
+Encryption Keys for Fernet
+--------------------------
+
+``keystone-manage fernet_setup`` will attempt to create a key repository as
+configured in the ``[fernet_tokens]`` section of ``keystone.conf`` and
+bootstrap it with encryption keys.
+
+A single 256-bit key is actually composed of two smaller keys: a 128-bit key
+used for SHA256 HMAC signing and a 128-bit key used for AES encryption. See the
+`Fernet token <https://github.com/fernet/spec>`_ specification for more detail.
+
+``keystone-manage fernet_rotate`` will rotate encryption keys through the
+following states:
+
+* **Staged key**: In a key rotation, a new key is introduced into the rotation
+  in this state. Only one key is considered to be the *staged* key at any given
+  time. This key will become the *primary* during the *next* key rotation. This
+  key is only used to validate tokens and serves to avoid race conditions in
+  multi-node deployments (all nodes should recognize all *primary* keys in the
+  deployment at all times). In a multi-node Keystone deployment this would
+  allow for the *staged* key to be replicated to all Keystone nodes before
+  being promoted to *primary* on a single node. This prevents the case where a
+  *primary* key is created on one Keystone node and tokens encryted/signed with
+  that new *primary* are rejected on another Keystone node because the new
+  *primary* doesn't exist there yet.
+
+* **Primary key**: In a key rotation, the old *staged* key is promoted to be
+  the *primary*. Only one key is considered to be the *primary* key at any
+  given time. This is the key used to generate new tokens. This key is also
+  used to validate previously generated tokens.
+
+* **Secondary keys**: In a key rotation, the old *primary* key is demoted to be
+  a *secondary* key. *Secondary* keys are only used to validate previously
+  generated tokens. You can maintain any number of *secondary* keys, up to
+  ``[fernet_tokens] max_active_keys`` (where "active" refers to the sum of all
+  recognized keys in any state: *staged*, *primary* or *secondary*). When
+  ``max_active_keys`` is exceeded during a key rotation, the oldest keys are
+  discarded.
+
+When a new primary key is created, all new tokens will be encrypted using the
+new primary key. The old primary key is demoted to a secondary key, which can
+still be used for validating tokens. Excess secondary keys (beyond
+``[fernet_tokens] max_active_keys``) are revoked. Revoked keys are permanently
+deleted.
+
+Rotating keys too frequently, or with ``[fernet_tokens] max_active_keys`` set
+too low, will cause tokens to become invalid prior to their expiration.
+
+Service Catalog
+---------------
+
+Keystone provides two configuration options for your service catalog.
+
+SQL-based Service Catalog (``sql.Catalog``)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+A dynamic database-backed driver fully supporting persistent configuration.
+
+``keystone.conf`` example:
+
+.. code-block:: ini
+
+    [catalog]
+    driver = keystone.catalog.backends.sql.Catalog
+
+.. NOTE::
+
+    A `template_file` does not need to be defined for the sql.Catalog driver.
+
+To build your service catalog using this driver, see the built-in help:
+
+.. code-block:: bash
+
+    $ openstack --help
+    $ openstack help service create
+    $ openstack help endpoint create
+
+You can also refer to `an example in Keystone (tools/sample_data.sh)
+<https://github.com/openstack/keystone/blob/master/tools/sample_data.sh>`_.
+
+File-based Service Catalog (``templated.Catalog``)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The templated catalog is an in-memory backend initialized from a read-only
+``template_file``. Choose this option only if you know that your service
+catalog will not change very much over time.
+
+.. NOTE::
+
+    Attempting to change your service catalog against this driver will result
+    in ``HTTP 501 Not Implemented`` errors. This is the expected behavior. If
+    you want to use these commands, you must instead use the SQL-based Service
+    Catalog driver.
+
+``keystone.conf`` example:
+
+.. code-block:: ini
+
+    [catalog]
+    driver = keystone.catalog.backends.templated.Catalog
+    template_file = /opt/stack/keystone/etc/default_catalog.templates
+
+The value of ``template_file`` is expected to be an absolute path to your
+service catalog configuration. An example ``template_file`` is included in
+Keystone, however you should create your own to reflect your deployment.
+
+Another such example is `available in devstack
+(files/default_catalog.templates)
+<https://github.com/openstack-dev/devstack/blob/master/files/default_catalog.templates>`_.
+
+Logging
+-------
+
+Logging is configured externally to the rest of Keystone. Configure the path to
+your logging configuration file using the ``[DEFAULT] log_config`` option of
+``keystone.conf``. If you wish to route all your logging through syslog, set
+the ``[DEFAULT] use_syslog`` option.
+
+A sample ``log_config`` file is included with the project at
+``etc/logging.conf.sample``. Like other OpenStack projects, Keystone uses the
+`Python logging module`, which includes extensive configuration options for
+choosing the output levels and formats.
+
+.. _Paste: http://pythonpaste.org/
+.. _`Python logging module`: http://docs.python.org/library/logging.html
+
+SSL
+---
+
+Keystone may be configured to support SSL and 2-way SSL out-of-the-box. The
+X509 certificates used by Keystone can be generated by ``keystone-manage``
+or obtained externally and configured for use with Keystone as described in
+this section. Here is the description of each of them and their purpose:
+
+.. WARNING::
+
+    The SSL configuration options available to the eventlet server
+    (``keystone-all``) described here are severely limited. A secure
+    deployment should have Keystone running in a web server (such as Apache
+    HTTPd), or behind an SSL terminator. When running Keystone in a web server
+    or behind an SSL terminator the options described in this section have no
+    effect and SSL is configured in the web server or SSL terminator.
+
+Types of certificates
+^^^^^^^^^^^^^^^^^^^^^
+
+* ``cacert.pem``: Certificate Authority chain to validate against.
+* ``ssl_cert.pem``: Public certificate for Keystone server.
+* ``middleware.pem``: Public and private certificate for Keystone
+  middleware/client.
+* ``cakey.pem``: Private key for the CA.
+* ``ssl_key.pem``: Private key for the Keystone server.
+
+Note that you may choose whatever names you want for these certificates, or
+combine the public/private keys in the same file if you wish. These
+certificates are just provided as an example.
+
+Configuration
+^^^^^^^^^^^^^
+
+To enable SSL modify the ``etc/keystone.conf`` file under the ``[ssl]`` and
+``[eventlet_server_ssl]`` sections. The following is an SSL configuration
+example using the included sample certificates:
+
+.. code-block:: ini
+
+    [eventlet_server_ssl]
+    enable = True
+    certfile = <path to keystone.pem>
+    keyfile = <path to keystonekey.pem>
+    ca_certs = <path to ca.pem>
+    cert_required = False
+
+    [ssl]
+    ca_key = <path to cakey.pem>
+    key_size = 1024
+    valid_days=3650
+    cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost
+
+* ``enable``: True enables SSL. Defaults to False.
+* ``certfile``: Path to Keystone public certificate file.
+* ``keyfile``: Path to Keystone private certificate file. If the private key is
+  included in the certfile, the keyfile may be omitted.
+* ``ca_certs``: Path to CA trust chain.
+* ``cert_required``: Requires client certificate. Defaults to False.
+
+When generating SSL certificates the following values are read
+
+* ``key_size``: Key size to create. Defaults to 1024.
+* ``valid_days``: How long the certificate is valid for. Defaults to 3650
+  (10 years).
+* ``ca_key``: The private key for the CA. Defaults to
+  ``/etc/keystone/ssl/certs/cakey.pem``.
+* ``cert_subject``: The subject to set in the certificate. Defaults to
+  ``/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost``. When setting the subject it
+  is important to set CN to be the address of the server so client validation
+  will succeed. This generally means having the subject be at least
+  ``/CN=<keystone ip>``
+
+Generating SSL certificates
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Certificates for encrypted HTTP communication can be generated by:
+
+.. code-block:: bash
+
+    $ keystone-manage ssl_setup
+
+This will create a private key, a public key and a certificate that will be
+used to encrypt communications with keystone. In the event that a Certificate
+Authority is not given a testing one will be created.
+
+It is likely in a production environment that these certificates will be
+created and provided externally. Note that ``ssl_setup`` is a development tool
+and is only recommended for developments environment. We do not recommend using
+``ssl_setup`` for production environments.
+
+
+User CRUD extension for the V2.0 API
+------------------------------------
+
+.. NOTE::
+
+    The core V3 API includes user operations so no extension needs to be
+    enabled for the V3 API.
+
+For the V2.0 API, Keystone provides a user CRUD filter that can be added to the
+public_api pipeline. This user crud filter allows users to use a HTTP PATCH to
+change their own password. To enable this extension you should define a
+user_crud_extension filter, insert it after the ``*_body`` middleware and
+before the ``public_service`` app in the public_api WSGI pipeline in
+``keystone-paste.ini`` e.g.:
+
+.. code-block:: ini
+
+    [filter:user_crud_extension]
+    paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory
+
+    [pipeline:public_api]
+    pipeline = url_normalize token_auth admin_token_auth json_body debug ec2_extension user_crud_extension public_service
+
+Each user can then change their own password with a HTTP PATCH :
+
+.. code-block:: bash
+
+    $ curl -X PATCH http://localhost:5000/v2.0/OS-KSCRUD/users/<userid> -H "Content-type: application/json" \
+    -H "X_Auth_Token: <authtokenid>" -d '{"user": {"password": "ABCD", "original_password": "DCBA"}}'
+
+In addition to changing their password all of the user's current tokens will be
+revoked.
+
+
+Inherited Role Assignment Extension
+-----------------------------------
+
+Keystone provides an optional extension that adds the capability to assign
+roles on a project or domain that, rather than affect the project or domain
+itself, are instead inherited to the project subtree or to all projects owned
+by that domain. This extension is disabled by default, but can be enabled by
+including the following in ``keystone.conf``:
+
+.. code-block:: ini
+
+    [os_inherit]
+    enabled = True
+
+
+Token Binding
+-------------
+
+Token binding refers to the practice of embedding information from external
+authentication providers (like a company's Kerberos server) inside the token
+such that a client may enforce that the token only be used in conjunction with
+that specified authentication. This is an additional security mechanism as it
+means that if a token is stolen it will not be usable without also providing
+the external authentication.
+
+To activate token binding you must specify the types of authentication that
+token binding should be used for in ``keystone.conf`` e.g.:
+
+.. code-block:: ini
+
+    [token]
+    bind = kerberos
+
+Currently only ``kerberos`` is supported.
+
+To enforce checking of token binding the ``enforce_token_bind`` parameter
+should be set to one of the following modes:
+
+* ``disabled`` disable token bind checking
+* ``permissive`` enable bind checking, if a token is bound to a mechanism that
+  is unknown to the server then ignore it. This is the default.
+* ``strict`` enable bind checking, if a token is bound to a mechanism that is
+  unknown to the server then this token should be rejected.
+* ``required`` enable bind checking and require that at least 1 bind mechanism
+  is used for tokens.
+* named enable bind checking and require that the specified authentication
+  mechanism is used. e.g.:
+
+  .. code-block:: ini
+
+    [token]
+    enforce_token_bind = kerberos
+
+  *Do not* set ``enforce_token_bind = named`` as there is not an authentication
+  mechanism called ``named``.
+
+Limiting the number of entities returned in a collection
+--------------------------------------------------------
+
+Keystone provides a method of setting a limit to the number of entities
+returned in a collection, which is useful to prevent overly long response times
+for list queries that have not specified a sufficiently narrow filter. This
+limit can be set globally by setting ``list_limit`` in the default section of
+``keystone.conf``, with no limit set by default. Individual driver sections may
+override this global value with a specific limit, for example:
+
+.. code-block:: ini
+
+    [resource]
+    list_limit = 100
+
+If a response to ``list_{entity}`` call has been truncated, then the response
+status code will still be 200 (OK), but the ``truncated`` attribute in the
+collection will be set to ``true``.
+
+Sample Configuration Files
+--------------------------
+
+The ``etc/`` folder distributed with Keystone contains example configuration
+files for each Server application.
+
+* ``etc/keystone.conf.sample``
+* ``etc/keystone-paste.ini``
+* ``etc/logging.conf.sample``
+* ``etc/default_catalog.templates``
+
+.. _`API protection with RBAC`:
+
+Keystone API protection with Role Based Access Control (RBAC)
+=============================================================
+
+Like most OpenStack projects, Keystone supports the protection of its APIs by
+defining policy rules based on an RBAC approach. These are stored in a JSON
+policy file, the name and location of which is set in the main Keystone
+configuration file.
+
+Each Keystone v3 API has a line in the policy file which dictates what level of
+protection is applied to it, where each line is of the form::
+
+  <api name>: <rule statement> or <match statement>
+
+where:
+
+``<rule statement>`` can contain ``<rule statement>`` or ``<match statement>``
+
+``<match statement>`` is a set of identifiers that must match between the token
+provided by the caller of the API and the parameters or target entities of the
+API call in question. For example:
+
+.. code-block:: javascript
+
+    "identity:create_user": "role:admin and domain_id:%(user.domain_id)s"
+
+Indicates that to create a user you must have the admin role in your token and
+in addition the domain_id in your token (which implies this must be a domain
+scoped token) must match the domain_id in the user object you are trying to
+create. In other words, you must have the admin role on the domain in which you
+are creating the user, and the token you are using must be scoped to that
+domain.
+
+Each component of a match statement is of the form::
+
+  <attribute from token>:<constant> or <attribute related to API call>
+
+The following attributes are available
+
+* Attributes from token: user_id, the domain_id or project_id depending on
+  the scope, and the list of roles you have within that scope
+
+* Attributes related to API call: Any parameters that are passed into the API
+  call are available, along with any filters specified in the query string.
+  Attributes of objects passed can be referenced using an object.attribute
+  syntax (e.g. user.domain_id). The target objects of an API are also available
+  using a target.object.attribute syntax. For instance:
+
+  .. code-block:: javascript
+
+    "identity:delete_user": "role:admin and domain_id:%(target.user.domain_id)s"
+
+  would ensure that the user object that is being deleted is in the same
+  domain as the token provided.
+
+Every target object has an `id` and a `name` available as `target.<object>.id`
+and `target.<object>.name`. Other attributes are retrieved from the database
+and vary between object types. Moreover, some database fields are filtered out
+(e.g. user passwords).
+
+List of object attributes:
+
+* role:
+    * target.role.id
+    * target.role.name
+
+* user:
+    * target.user.default_project_id
+    * target.user.description
+    * target.user.domain_id
+    * target.user.enabled
+    * target.user.id
+    * target.user.name
+
+* group:
+    * target.group.description
+    * target.group.domain_id
+    * target.group.id
+    * target.group.name
+
+* domain:
+    * target.domain.enabled
+    * target.domain.id
+    * target.domain.name
+
+* project:
+    * target.project.description
+    * target.project.domain_id
+    * target.project.enabled
+    * target.project.id
+    * target.project.name
+
+The default policy.json file supplied provides a somewhat basic example of API
+protection, and does not assume any particular use of domains. For multi-domain
+configuration installations where, for example, a cloud provider wishes to
+allow administration of the contents of a domain to be delegated, it is
+recommended that the supplied policy.v3cloudsample.json is used as a basis for
+creating a suitable production policy file. This example policy file also shows
+the use of an admin_domain to allow a cloud provider to enable cloud
+administrators to have wider access across the APIs.
+
+A clean installation would need to perhaps start with the standard policy file,
+to allow creation of the admin_domain with the first users within it. The
+domain_id of the admin domain would then be obtained and could be pasted into a
+modified version of policy.v3cloudsample.json which could then be enabled as
+the main policy file.
+
+.. _`prepare your deployment`:
+
+Preparing your deployment
+=========================
+
+Step 1: Configure keystone.conf
+-------------------------------
+
+Ensure that your ``keystone.conf`` is configured to use a SQL driver:
+
+.. code-block:: ini
+
+    [identity]
+    driver = keystone.identity.backends.sql.Identity
+
+You may also want to configure your ``[database]`` settings to better reflect
+your environment:
+
+.. code-block:: ini
+
+    [database]
+    connection = sqlite:///keystone.db
+    idle_timeout = 200
+
+.. NOTE::
+
+    It is important that the database that you specify be different from the
+    one containing your existing install.
+
+Step 2: Sync your new, empty database
+-------------------------------------
+
+You should now be ready to initialize your new database without error, using:
+
+.. code-block:: bash
+
+    $ keystone-manage db_sync
+
+To test this, you should now be able to start ``keystone-all`` and use the
+OpenStack Client to list your projects (which should successfully return an
+empty list from your new database):
+
+.. code-block:: bash
+
+    $ openstack --os-token ADMIN --os-url http://127.0.0.1:35357/v2.0/ project list
+
+.. NOTE::
+
+    We're providing the default OS_TOKEN and OS_URL values from
+    ``keystone.conf`` to connect to the Keystone service. If you changed those
+    values, or deployed Keystone to a different endpoint, you will need to
+    change the provided command accordingly.
+
+Initializing Keystone
+=====================
+
+``keystone-manage`` is designed to execute commands that cannot be administered
+through the normal REST API. At the moment, the following calls are supported:
+
+* ``db_sync``: Sync the database.
+* ``db_version``: Print the current migration version of the database.
+* ``mapping_purge``: Purge the identity mapping table.
+* ``pki_setup``: Initialize the certificates used to sign tokens.
+* ``saml_idp_metadata``: Generate identity provider metadata.
+* ``ssl_setup``: Generate certificates for SSL.
+* ``token_flush``: Purge expired tokens
+
+Invoking ``keystone-manage`` by itself will give you additional usage
+information.
+
+The private key used for token signing can only be read by its owner. This
+prevents unauthorized users from spuriously signing tokens.
+``keystone-manage pki_setup`` Should be run as the same system user that will
+be running the Keystone service to ensure proper ownership for the private key
+file and the associated certificates.
+
+Adding Users, Projects, and Roles via Command Line Interfaces
+=============================================================
+
+Keystone APIs are protected by the rules in the policy file. The default policy
+rules require admin credentials to administer ``users``, ``projects``, and
+``roles``. See section
+`Keystone API protection with Role Based Access Control (RBAC)`_ for more
+details on policy files.
+
+The Keystone command line interface packaged in `python-keystoneclient`_ only
+supports the Identity v2.0 API. The OpenStack common command line interface
+packaged in `python-openstackclient`_ supports both v2.0 and v3 APIs.
+
+With both command line interfaces there are two ways to configure the client to
+use admin credentials, using either an existing token or password credentials.
+
+.. NOTE::
+
+    As of the Juno release, it is recommended to use
+    ``python-openstackclient``, as it supports both v2.0 and v3 APIs. For the
+    purpose of backwards compatibility, the CLI packaged in
+    ``python-keystoneclient`` is not being removed.
+
+.. _`python-openstackclient`: http://docs.openstack.org/developer/python-openstackclient/
+.. _`python-keystoneclient`: http://docs.openstack.org/developer/python-keystoneclient/
+
+Authenticating with a Token
+---------------------------
+
+.. NOTE::
+
+    If your Keystone deployment is brand new, you will need to use this
+    authentication method, along with your ``[DEFAULT] admin_token``.
+
+To authenticate with Keystone using a token and ``python-openstackclient``, set
+the following flags.
+
+* ``--os-url OS_URL``: Keystone endpoint the user communicates with
+* ``--os-token OS_TOKEN``: User's service token
+
+To administer a Keystone endpoint, your token should be either belong to a user
+with the ``admin`` role, or, if you haven't created one yet, should be equal to
+the value defined by ``[DEFAULT] admin_token`` in your ``keystone.conf``.
+
+You can also set these variables in your environment so that they do not need
+to be passed as arguments each time:
+
+.. code-block:: bash
+
+    $ export OS_URL=http://localhost:35357/v2.0
+    $ export OS_TOKEN=ADMIN
+
+Instead of ``python-openstackclient``, if using ``python-keystoneclient``, set
+the following:
+
+* ``--os-endpoint OS_SERVICE_ENDPOINT``: equivalent to ``--os-url OS_URL``
+* ``--os-service-token OS_SERVICE_TOKEN``: equivalent to
+  ``--os-token OS_TOKEN``
+
+
+Authenticating with a Password
+------------------------------
+
+To authenticate with Keystone using a password and ``python-openstackclient``,
+set the following flags, note that the following user referenced below should
+be granted the ``admin`` role.
+
+* ``--os-username OS_USERNAME``: Name of your user
+* ``--os-password OS_PASSWORD``: Password for your user
+* ``--os-project-name OS_PROJECT_NAME``: Name of your project
+* ``--os-auth-url OS_AUTH_URL``: URL of the Keystone authentication server
+
+You can also set these variables in your environment so that they do not need
+to be passed as arguments each time:
+
+.. code-block:: bash
+
+    $ export OS_USERNAME=my_username
+    $ export OS_PASSWORD=my_password
+    $ export OS_PROJECT_NAME=my_project
+    $ export OS_AUTH_URL=http://localhost:35357/v2.0
+
+If using ``python-keystoneclient``, set the following instead:
+
+* ``--os-tenant-name OS_TENANT_NAME``: equivalent to
+  ``--os-project-name OS_PROJECT_NAME``
+
+
+Example usage
+-------------
+
+``python-openstackclient`` is set up to expect commands in the general form of:
+
+.. code-block:: bash
+
+  $ openstack [<global-options>] <object-1> <action> [<object-2>] [<command-arguments>]
+
+For example, the commands ``user list`` and ``project create`` can be invoked
+as follows:
+
+.. code-block:: bash
+
+    # Using token authentication, with environment variables
+    $ export OS_URL=http://127.0.0.1:35357/v2.0/
+    $ export OS_TOKEN=secrete_token
+    $ openstack user list
+    $ openstack project create demo
+
+    # Using token authentication, with flags
+    $ openstack --os-token=secrete --os-url=http://127.0.0.1:35357/v2.0/ user list
+    $ openstack --os-token=secrete --os-url=http://127.0.0.1:35357/v2.0/ project create demo
+
+    # Using password authentication, with environment variables
+    $ export OS_USERNAME=admin
+    $ export OS_PASSWORD=secrete
+    $ export OS_PROJECT_NAME=admin
+    $ export OS_AUTH_URL=http://localhost:35357/v2.0
+    $ openstack user list
+    $ openstack project create demo
+
+    # Using password authentication, with flags
+    $ openstack --os-username=admin --os-password=secrete --os-project-name=admin --os-auth-url=http://localhost:35357/v2.0 user list
+    $ openstack --os-username=admin --os-password=secrete --os-project-name=admin --os-auth-url=http://localhost:35357/v2.0 project create demo
+
+For additional examples using ``python-keystoneclient`` refer to
+`python-keystoneclient examples`_, likewise, for additional examples using
+``python-openstackclient``, refer to `python-openstackclient examples`_.
+
+.. _`python-keystoneclient examples`: cli_examples.html#using-python-keystoneclient-v2-0
+.. _`python-openstackclient examples`: cli_examples.html#using-python-openstackclient-v3
+
+
+Removing Expired Tokens
+=======================
+
+In the SQL backend expired tokens are not automatically removed. These tokens
+can be removed with:
+
+.. code-block:: bash
+
+    $ keystone-manage token_flush
+
+The memcache backend automatically discards expired tokens and so flushing is
+unnecessary and if attempted will fail with a NotImplemented error.
+
+
+Configuring the LDAP Identity Provider
+======================================
+
+As an alternative to the SQL Database backing store, Keystone can use a
+directory server to provide the Identity service. An example Schema for
+OpenStack would look like this::
+
+  dn: dc=openstack,dc=org
+  dc: openstack
+  objectClass: dcObject
+  objectClass: organizationalUnit
+  ou: openstack
+
+  dn: ou=Projects,dc=openstack,dc=org
+  objectClass: top
+  objectClass: organizationalUnit
+  ou: groups
+
+  dn: ou=Users,dc=openstack,dc=org
+  objectClass: top
+  objectClass: organizationalUnit
+  ou: users
+
+  dn: ou=Roles,dc=openstack,dc=org
+  objectClass: top
+  objectClass: organizationalUnit
+  ou: roles
+
+The corresponding entries in the Keystone configuration file are:
+
+.. code-block:: ini
+
+  [ldap]
+  url = ldap://localhost
+  user = dc=Manager,dc=openstack,dc=org
+  password = badpassword
+  suffix = dc=openstack,dc=org
+  use_dumb_member = False
+  allow_subtree_delete = False
+
+  user_tree_dn = ou=Users,dc=openstack,dc=org
+  user_objectclass = inetOrgPerson
+
+  project_tree_dn = ou=Projects,dc=openstack,dc=org
+  project_objectclass = groupOfNames
+
+  role_tree_dn = ou=Roles,dc=openstack,dc=org
+  role_objectclass = organizationalRole
+
+The default object classes and attributes are intentionally simplistic. They
+reflect the common standard objects according to the LDAP RFCs. However, in a
+live deployment, the correct attributes can be overridden to support a
+preexisting, more complex schema. For example, in the user object, the
+objectClass posixAccount from RFC2307 is very common. If this is the underlying
+objectclass, then the *uid* field should probably be *uidNumber* and *username*
+field either *uid* or *cn*. To change these two fields, the corresponding
+entries in the Keystone configuration file are:
+
+.. code-block:: ini
+
+  [ldap]
+  user_id_attribute = uidNumber
+  user_name_attribute = cn
+
+
+There is a set of allowed actions per object type that you can modify depending
+on your specific deployment. For example, the users are managed by another tool
+and you have only read access, in such case the configuration is:
+
+.. code-block:: ini
+
+  [ldap]
+  user_allow_create = False
+  user_allow_update = False
+  user_allow_delete = False
+
+  project_allow_create = True
+  project_allow_update = True
+  project_allow_delete = True
+
+  role_allow_create = True
+  role_allow_update = True
+  role_allow_delete = True
+
+There are some configuration options for filtering users, tenants and roles, if
+the backend is providing too much output, in such case the configuration will
+look like:
+
+.. code-block:: ini
+
+  [ldap]
+  user_filter = (memberof=CN=openstack-users,OU=workgroups,DC=openstack,DC=org)
+  project_filter =
+  role_filter =
+
+In case that the directory server does not have an attribute enabled of type
+boolean for the user, there is several configuration parameters that can be
+used to extract the value from an integer attribute like in Active Directory:
+
+.. code-block:: ini
+
+  [ldap]
+  user_enabled_attribute = userAccountControl
+  user_enabled_mask      = 2
+  user_enabled_default   = 512
+
+In this case the attribute is an integer and the enabled attribute is listed in
+bit 1, so the if the mask configured *user_enabled_mask* is different from 0,
+it gets the value from the field *user_enabled_attribute* and it makes an ADD
+operation with the value indicated on *user_enabled_mask* and if the value
+matches the mask then the account is disabled.
+
+It also saves the value without mask to the user identity in the attribute
+*enabled_nomask*. This is needed in order to set it back in case that we need
+to change it to enable/disable a user because it contains more information than
+the status like password expiration. Last setting *user_enabled_mask* is needed
+in order to create a default value on the integer attribute (512 = NORMAL
+ACCOUNT on AD)
+
+In case of Active Directory the classes and attributes could not match the
+specified classes in the LDAP module so you can configure them like:
+
+.. code-block:: ini
+
+  [ldap]
+  user_objectclass          = person
+  user_id_attribute         = cn
+  user_name_attribute       = cn
+  user_mail_attribute       = mail
+  user_enabled_attribute    = userAccountControl
+  user_enabled_mask         = 2
+  user_enabled_default      = 512
+  user_attribute_ignore     = tenant_id,tenants
+  project_objectclass       = groupOfNames
+  project_id_attribute      = cn
+  project_member_attribute  = member
+  project_name_attribute    = ou
+  project_desc_attribute    = description
+  project_enabled_attribute = extensionName
+  project_attribute_ignore  =
+  role_objectclass          = organizationalRole
+  role_id_attribute         = cn
+  role_name_attribute       = ou
+  role_member_attribute     = roleOccupant
+  role_attribute_ignore     =
+
+Debugging LDAP
+--------------
+
+For additional information on LDAP connections, performance (such as slow
+response time), or field mappings, setting ``debug_level`` in the [ldap]
+section is used to enable debugging:
+
+.. code-block:: ini
+
+  debug_level = 4095
+
+This setting in turn sets OPT_DEBUG_LEVEL in the underlying python library.
+This field is a bit mask (integer), and the possible flags are documented in
+the OpenLDAP manpages. Commonly used values include 255 and 4095, with 4095
+being more verbose.
+
+.. WARNING::
+  Enabling ``debug_level`` will negatively impact performance.
+
+Enabled Emulation
+-----------------
+
+Some directory servers do not provide any enabled attribute. For these servers,
+the ``user_enabled_emulation`` and ``project_enabled_emulation`` attributes
+have been created. They are enabled by setting their respective flags to True.
+Then the attributes ``user_enabled_emulation_dn`` and
+``project_enabled_emulation_dn`` may be set to specify how the enabled users
+and projects (tenants) are selected. These attributes work by using a
+``groupOfNames`` and adding whichever users or projects (tenants) that you want
+enabled to the respective group. For example, this will mark any user who is a
+member of ``enabled_users`` as enabled:
+
+.. code-block:: ini
+
+  [ldap]
+  user_enabled_emulation = True
+  user_enabled_emulation_dn = cn=enabled_users,cn=groups,dc=openstack,dc=org
+
+The default values for user and project (tenant) enabled emulation DN is
+``cn=enabled_users,$user_tree_dn`` and ``cn=enabled_tenants,$project_tree_dn``
+respectively.
+
+Secure Connection
+-----------------
+
+If you are using a directory server to provide the Identity service, it is
+strongly recommended that you utilize a secure connection from Keystone to the
+directory server. In addition to supporting LDAP, Keystone also provides
+Transport Layer Security (TLS) support. There are some basic configuration
+options for enabling TLS, identifying a single file or directory that contains
+certificates for all the Certificate Authorities that the Keystone LDAP client
+will recognize, and declaring what checks the client should perform on server
+certificates. This functionality can easily be configured as follows:
+
+.. code-block:: ini
+
+  [ldap]
+  use_tls = True
+  tls_cacertfile = /etc/keystone/ssl/certs/cacert.pem
+  tls_cacertdir = /etc/keystone/ssl/certs/
+  tls_req_cert = demand
+
+A few points worth mentioning regarding the above options. If both
+tls_cacertfile and tls_cacertdir are set then tls_cacertfile will be used and
+tls_cacertdir is ignored. Furthermore, valid options for tls_req_cert are
+demand, never, and allow. These correspond to the standard options permitted by
+the TLS_REQCERT TLS option.
+
+Read Only LDAP
+--------------
+
+Many environments typically have user and group information in directories that
+are accessible by LDAP. This information is for read-only use in a wide array
+of applications. Prior to the Havana release, we could not deploy Keystone with
+read-only directories as backends because Keystone also needed to store
+information such as projects, roles, domains and role assignments into the
+directories in conjunction with reading user and group information.
+
+Keystone now provides an option whereby these read-only directories can be
+easily integrated as it now enables its identity entities (which comprises
+users, groups, and group memberships) to be served out of directories while
+resource (which comprises projects and domains), assignment and role
+entities are to be served from different Keystone backends (i.e. SQL). To
+enable this option, you must have the following ``keystone.conf`` options set:
+
+.. code-block:: ini
+
+  [identity]
+  driver = keystone.identity.backends.ldap.Identity
+
+  [resource]
+  driver = keystone.resource.backends.sql.Resource
+
+  [assignment]
+  driver = keystone.assignment.backends.sql.Assignment
+
+  [role]
+  driver = keystone.assignment.role_backends.sql.Role
+
+With the above configuration, Keystone will only lookup identity related
+information such users, groups, and group membership from the directory, while
+resources, roles and assignment related information will be provided by the SQL
+backend. Also note that if there is an LDAP Identity, and no resource,
+assignment or role backend is specified, they will default to LDAP. Although
+this may seem counterintuitive, it is provided for backwards compatibility.
+Nonetheless, the explicit option will always override the implicit option, so
+specifying the options as shown above will always be correct.  Finally, it is
+also worth noting that whether or not the LDAP accessible directory is to be
+considered read only is still configured as described in a previous section
+above by setting values such as the following in the ``[ldap]`` configuration
+section:
+
+.. code-block:: ini
+
+  [ldap]
+  user_allow_create = False
+  user_allow_update = False
+  user_allow_delete = False
+
+.. NOTE::
+
+    While having identity related infomration backed by LDAP while other
+    information is backed by SQL is a supported configuration, as shown above;
+    the opposite is not true. If either resource or assignment drivers are
+    configured for LDAP, then Identity must also be configured for LDAP.
+
+Connection Pooling
+------------------
+
+Various LDAP backends in Keystone use a common LDAP module to interact with
+LDAP data. By default, a new connection is established for LDAP operations.
+This can become highly expensive when TLS support is enabled which is a likely
+configuraton in enterprise setup. Re-using of connectors from a connection pool
+drastically reduces overhead of initiating a new connection for every LDAP
+operation.
+
+Keystone now provides connection pool support via configuration. This change
+will keep LDAP connectors alive and re-use for subsequent LDAP operations. A
+connection lifespan is going to be configurable with other pooling specific
+attributes. The change is made in LDAP handler layer logic which is primarily
+responsible for LDAP connection and shared common operations.
+
+In LDAP identity driver, Keystone authenticates end user by LDAP bind with user
+DN and provided password. These kind of auth binds can fill up the pool pretty
+quickly so a separate pool is provided for those end user auth bind calls. If a
+deployment does not want to use pool for those binds, then it can disable
+pooling selectively by ``use_auth_pool`` as false. If a deployment wants to use
+pool for those auth binds, then ``use_auth_pool`` needs to be true. For auth
+pool, a different pool size (``auth_pool_size``) and connection lifetime
+(``auth_pool_connection_lifetime``) can be specified. With enabled auth pool,
+its connection lifetime should be kept short so that pool frequently re-binds
+the connection with provided creds and works reliably in end user password
+change case. When ``use_pool`` is false (disabled), then auth pool
+configuration is also not used.
+
+Connection pool configuration is added in ``[ldap]`` configuration section:
+
+.. code-block:: ini
+
+  [ldap]
+  # Enable LDAP connection pooling. (boolean value)
+  use_pool=false
+
+  # Connection pool size. (integer value)
+  pool_size=10
+
+  # Maximum count of reconnect trials. (integer value)
+  pool_retry_max=3
+
+  # Time span in seconds to wait between two reconnect trials.
+  # (floating point value)
+  pool_retry_delay=0.1
+
+  # Connector timeout in seconds. Value -1 indicates indefinite wait for
+  # response. (integer value)
+  pool_connection_timeout=-1
+
+  # Connection lifetime in seconds. (integer value)
+  pool_connection_lifetime=600
+
+  # Enable LDAP connection pooling for end user authentication. If use_pool
+  # is disabled, then this setting is meaningless and is not used at all.
+  # (boolean value)
+  use_auth_pool=false
+
+  # End user auth connection pool size. (integer value)
+  auth_pool_size=100
+
+  # End user auth connection lifetime in seconds. (integer value)
+  auth_pool_connection_lifetime=60
+
diff --git a/keystone-moon/doc/source/configure_federation.rst b/keystone-moon/doc/source/configure_federation.rst
new file mode 100644 (file)
index 0000000..2da5f82
--- /dev/null
@@ -0,0 +1,336 @@
+..
+    Licensed under the Apache License, Version 2.0 (the "License"); you may not
+    use this file except in compliance with the License. You may obtain a copy
+    of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+    License for the specific language governing permissions and limitations
+    under the License.
+
+===================================
+Configuring Keystone for Federation
+===================================
+
+-----------
+Definitions
+-----------
+* `Service Provider (SP)`: provides a service to an end-user.
+* `Identity Provider (IdP)`: service that stores information about users and
+  groups.
+* `SAML assertion`: contains information about a user as provided by an IdP.
+
+-----------------------------------
+Keystone as a Service Provider (SP)
+-----------------------------------
+
+.. NOTE::
+
+    This feature is considered stable and supported as of the Juno release.
+
+Prerequisites
+-------------
+
+This approach to federation supports Keystone as a Service Provider, consuming
+identity properties issued by an external Identity Provider, such as SAML
+assertions or OpenID Connect claims.
+
+Federated users are not mirrored in the Keystone identity backend
+(for example, using the SQL driver). The external Identity Provider is
+responsible for authenticating users, and communicates the result of
+authentication to Keystone using identity properties. Keystone maps these
+values to Keystone user groups and assignments created in Keystone.
+
+The following configuration steps were performed on a machine running
+Ubuntu 12.04 and Apache 2.2.22.
+
+To enable federation, you'll need to:
+
+1. Run Keystone under Apache, rather than using ``keystone-all``.
+2. Configure Apache to use a federation capable authentication method.
+3. Enable ``OS-FEDERATION`` extension.
+
+Configure Apache to use a federation capable authentication method
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are many ways to configure Federation in the Apache HTTPD server.
+Using Shibboleth and OpenID Connect are documented so far.
+
+* To use Shibboleth, follow the steps outlined at: `Setup Shibboleth`_.
+* To use OpenID Connect, follow the steps outlined at: `Setup OpenID Connect`_.
+
+.. _`Setup Shibboleth`: extensions/shibboleth.html
+.. _`Setup OpenID Connect`: extensions/openidc.html
+
+Enable the ``OS-FEDERATION`` extension
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Follow the steps outlined at: `Enabling Federation Extension`_.
+
+.. _`Enabling Federation Extension`: extensions/federation.html
+
+Configuring Federation
+----------------------
+
+Now that the Identity Provider and Keystone are communicating we can start to
+configure the ``OS-FEDERATION`` extension.
+
+1. Add local Keystone groups and roles
+2. Add Identity Provider(s), Mapping(s), and Protocol(s)
+
+Create Keystone groups and assign roles
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As mentioned earlier, no new users will be added to the Identity backend, but
+the Identity Service requires group-based role assignments to authorize
+federated users. The federation mapping function will map the user into local
+Identity Service groups objects, and hence to local role assignments.
+
+Thus, it is required to create the necessary Identity Service groups that
+correspond to the Identity Provider's groups; additionally, these groups should
+be assigned roles on one or more projects or domains.
+
+You may be interested in more information on `group management
+<http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3.html#create-group>`_
+and `role assignments
+<http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3.html#grant-role-to-group-on-project>`_,
+both of which are exposed to the CLI via `python-openstackclient
+<https://pypi.python.org/pypi/python-openstackclient/>`_.
+
+Add Identity Provider(s), Mapping(s), and Protocol(s)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To utilize federation the following must be created in the Identity Service:
+
+* Identity Provider
+* Mapping
+* Protocol
+
+More information on ``OS-FEDERATION`` can be found `here
+<http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-federation-ext.html>`__.
+
+~~~~~~~~~~~~~~~~~
+Identity Provider
+~~~~~~~~~~~~~~~~~
+
+Create an Identity Provider object in Keystone, which represents the Identity
+Provider we will use to authenticate end users.
+
+More information on identity providers can be found `here
+<http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-federation-ext.html#register-an-identity-provider>`__.
+
+~~~~~~~
+Mapping
+~~~~~~~
+A mapping is a list of rules. The only Identity API objects that will support mapping are groups
+and users.
+
+Mapping adds a set of rules to map federation protocol attributes to Identity API objects.
+An Identity Provider has exactly one mapping specified per protocol.
+
+Mapping objects can be used multiple times by different combinations of Identity Provider and Protocol.
+
+More information on mapping can be found `here
+<http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-federation-ext.html#create-a-mapping>`__.
+
+~~~~~~~~
+Protocol
+~~~~~~~~
+
+A protocol contains information that dictates which Mapping rules to use for an incoming
+request made by an IdP. An IdP may have multiple supported protocols.
+
+Add `Protocol object
+<http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-federation-ext.html#add-a-protocol-and-attribute-mapping-to-an-identity-provider>`__ and specify the mapping id
+you want to use with the combination of the IdP and Protocol.
+
+Performing federated authentication
+-----------------------------------
+
+1. Authenticate externally and generate an unscoped token in Keystone
+2. Determine accessible resources
+3. Get a scoped token
+
+Get an unscoped token
+~~~~~~~~~~~~~~~~~~~~~
+
+Unlike other authentication methods in the Identity Service, the user does not
+issue an HTTP POST request with authentication data in the request body. To
+start federated authentication a user must access the dedicated URL with
+Identity Provider's and Protocol's identifiers stored within a protected URL.
+The URL has a format of:
+``/v3/OS-FEDERATION/identity_providers/{identity_provider}/protocols/{protocol}/auth``.
+
+In this instance we follow a standard SAML2 authentication procedure, that is,
+the user will be redirected to the Identity Provider's authentication webpage
+and be prompted for credentials. After successfully authenticating the user
+will be redirected to the Service Provider's endpoint. If using a web browser,
+a token will be returned in XML format.
+
+In the returned unscoped token, a list of Identity Service groups the user
+belongs to will be included.
+
+More information on getting an unscoped token can be found `here
+<http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-federation-ext.html#authenticating>`__.
+
+~~~~~~~~~~~~
+Example cURL
+~~~~~~~~~~~~
+
+Note that the request does not include a body. The following url would be
+considered protected by ``mod_shib`` and Apache, as such a request made
+to the URL would be redirected to the Identity Provider, to start the
+SAML authentication procedure.
+
+.. code-block:: bash
+
+    $ curl -X GET -D - http://localhost:5000/v3/OS-FEDERATION/identity_providers/{identity_provider}/protocols/{protocol}/auth
+
+Determine accessible resources
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+By using the previously returned token, the user can issue requests to the list
+projects and domains that are accessible.
+
+* List projects a federated user can access: ``GET /OS-FEDERATION/projects``
+* List domains a federated user can access: ``GET /OS-FEDERATION/domains``
+
+More information on listing resources can be found `here
+<http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-federation-ext.html#listing-projects-and-domains>`__.
+
+~~~~~~~~~~~~
+Example cURL
+~~~~~~~~~~~~
+
+.. code-block:: bash
+
+    $ curl -X GET -H "X-Auth-Token: <unscoped token>" http://localhost:5000/v3/OS-FEDERATION/projects
+
+or
+
+.. code-block:: bash
+
+    $ curl -X GET -H "X-Auth-Token: <unscoped token>" http://localhost:5000/v3/OS-FEDERATION/domains
+
+Get a scoped token
+~~~~~~~~~~~~~~~~~~
+
+A federated user may request a scoped token, by using the unscoped token. A
+project or domain may be specified by either ``id`` or ``name``. An ``id`` is
+sufficient to uniquely identify a project or domain.
+
+More information on getting a scoped token can be found `here
+<http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-federation-ext.html#request-a-scoped-os-federation-token>`__.
+
+~~~~~~~~~~~~
+Example cURL
+~~~~~~~~~~~~
+
+.. code-block:: bash
+
+    $ curl -X POST -H "Content-Type: application/json" -d '{"auth":{"identity":{"methods":["saml2"],"saml2":{"id":"<unscoped_token_id>"}},"scope":{"project":{"domain": {"name": "Default"},"name":"service"}}}}' -D - http://localhost:5000/v3/auth/tokens
+
+--------------------------------------
+Keystone as an Identity Provider (IdP)
+--------------------------------------
+
+.. NOTE::
+
+    This feature is experimental and unsupported in Juno (with several issues
+    that will not be backported). These issues have been fixed and this feature
+    is considered stable and supported as of the Kilo release.
+
+Configuration Options
+---------------------
+
+There are certain settings in ``keystone.conf`` that must be setup, prior to
+attempting to federate multiple Keystone deployments.
+
+Within ``keystone.conf``, assign values to the ``[saml]`` related fields, for
+example:
+
+.. code-block:: ini
+
+    [saml]
+    certfile=/etc/keystone/ssl/certs/ca.pem
+    keyfile=/etc/keystone/ssl/private/cakey.pem
+    idp_entity_id=https://keystone.example.com/v3/OS-FEDERATION/saml2/idp
+    idp_sso_endpoint=https://keystone.example.com/v3/OS-FEDERATION/saml2/sso
+    idp_metadata_path=/etc/keystone/saml2_idp_metadata.xml
+
+Though not necessary, the follow Organization configuration options should
+also be setup. It is recommended that these values be URL safe.
+
+.. code-block:: ini
+
+    idp_organization_name=example_company
+    idp_organization_display_name=Example Corp.
+    idp_organization_url=example.com
+
+As with the Organizaion options, the Contact options, are not necessary, but
+it's advisable to set these values too.
+
+.. code-block:: ini
+
+    idp_contact_company=example_company
+    idp_contact_name=John
+    idp_contact_surname=Smith
+    idp_contact_email=jsmith@example.com
+    idp_contact_telephone=555-55-5555
+    idp_contact_type=technical
+
+Generate Metadata
+-----------------
+
+In order to create a trust between the IdP and SP, metadata must be exchanged.
+To create metadata for your Keystone IdP, run the ``keystone-manage`` command
+and pipe the output to a file. For example:
+
+.. code-block:: bash
+
+    $ keystone-manage saml_idp_metadata > /etc/keystone/saml2_idp_metadata.xml
+
+.. NOTE::
+    The file location should match the value of the configuration option
+    ``idp_metadata_path`` that was assigned in the previous section.
+
+Create a Service Provider (SP)
+------------------------------
+
+In this example we are creating a new Service Provider with an ID of ``BETA``,
+a ``sp_url`` of ``http://beta.example.com/Shibboleth.sso/POST/ECP`` and a
+``auth_url`` of ``http://beta.example.com:5000/v3/OS-FEDERATION/identity_providers/beta/protocols/saml2/auth``
+. The ``sp_url`` will be used when creating a SAML assertion for ``BETA`` and
+signed by the current Keystone IdP. The ``auth_url`` is used to retrieve the
+token for ``BETA`` once the SAML assertion is sent.
+
+.. code-block:: bash
+
+    $ curl -s -X PUT \
+      -H "X-Auth-Token: $OS_TOKEN" \
+      -H "Content-Type: application/json" \
+      -d '{"service_provider": {"auth_url": "http://beta.example.com:5000/v3/OS-FEDERATION/identity_providers/beta/protocols/saml2/auth", "sp_url": "https://example.com:5000/Shibboleth.sso/SAML2/ECP"}' \
+      http://localhost:5000/v3/service_providers/BETA | python -mjson.tool
+
+Testing it all out
+------------------
+
+Lastly, if a scoped token and a Service Provider region are presented to
+Keystone, the result will be a full SAML Assertion, signed by the IdP
+Keystone, specifically intended for the Service Provider Keystone.
+
+.. code-block:: bash
+
+    $ curl -s -X POST \
+      -H "Content-Type: application/json" \
+      -d '{"auth": {"scope": {"service_provider": {"id": "BETA"}}, "identity": {"token": {"id": "d793d935b9c343f783955cf39ee7dc3c"}, "methods": ["token"]}}}' \
+      http://localhost:5000/v3/auth/OS-FEDERATION/saml2
+
+At this point the SAML Assertion can be sent to the Service Provider Keystone
+using the provided ``auth_url`` in the ``X-Auth-Url`` header present in the
+response containing the SAML Assertion, and a valid OpenStack token, issued by
+a Service Provider Keystone, will be returned.
+
diff --git a/keystone-moon/doc/source/configuringservices.rst b/keystone-moon/doc/source/configuringservices.rst
new file mode 100644 (file)
index 0000000..3ffa13e
--- /dev/null
@@ -0,0 +1,162 @@
+..
+      Copyright 2011-2012 OpenStack Foundation
+      All Rights Reserved.
+
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+==========================================
+Configuring Services to work with Keystone
+==========================================
+
+.. toctree::
+   :maxdepth: 1
+
+Once Keystone is installed and running (see :doc:`configuration`), services
+need to be configured to work with it. To do this, we primarily install and
+configure middleware for the OpenStack service to handle authentication tasks
+or otherwise interact with Keystone.
+
+In general:
+
+* Clients making calls to the service will pass in an authentication token.
+* The Keystone middleware will look for and validate that token, taking the
+  appropriate action.
+* It will also retrieve additional information from the token such as user
+  name, user id, project name, project id, roles, etc...
+
+The middleware will pass those data down to the service as headers. More
+details on the architecture of that setup is described in the
+`authentication middleware documentation`_.
+
+Setting up credentials
+======================
+
+Admin Token
+-----------
+
+For a default installation of Keystone, before you can use the REST API, you
+need to define an authorization token. This is configured in ``keystone.conf``
+file under the section ``[DEFAULT]``. In the sample file provided with the
+Keystone project, the line defining this token is::
+
+    [DEFAULT]
+    admin_token = ADMIN
+
+A "shared secret" that can be used to bootstrap Keystone. This token does not
+represent a user, and carries no explicit authorization.
+To disable in production (highly recommended), remove AdminTokenAuthMiddleware
+from your paste application pipelines (for example, in keystone-paste.ini)
+
+Setting up projects, users, and roles
+-------------------------------------
+
+You need to minimally define a project, user, and role to link the project and
+user as the most basic set of details to get other services authenticating
+and authorizing with Keystone.
+
+You will also want to create service users for nova, glance, swift, etc. to
+be able to use to authenticate users against Keystone. The ``auth_token``
+middleware supports using either the shared secret described above as
+`admin_token` or users for each service.
+
+See :doc:`configuration` for a walk through on how to create projects, users,
+and roles.
+
+Setting up services
+===================
+
+Creating Service Users
+----------------------
+
+To configure the OpenStack services with service users, we need to create
+a project for all the services, and then users for each of the services. We
+then assign those service users an ``admin`` role on the service project. This
+allows them to validate tokens - and to authenticate and authorize other user
+requests.
+
+Create a project for the services, typically named ``service`` (however, the
+name can be whatever you choose):
+
+.. code-block:: bash
+
+    $ openstack project create service
+
+Create service users for ``nova``, ``glance``, ``swift``, and ``neutron``
+(or whatever subset is relevant to your deployment):
+
+.. code-block:: bash
+
+    $ openstack user create nova --password Sekr3tPass --project service
+
+Repeat this for each service you want to enable.
+
+Create an administrative role for the service accounts, typically named
+``admin`` (however the name can be whatever you choose). For adding the
+administrative role to the service accounts, you'll need to know the
+name of the role you want to add. If you don't have it handy, you can look it
+up quickly with:
+
+.. code-block:: bash
+
+    $ openstack role list
+
+Once you have it, grant the administrative role to the service users. This is
+all assuming that you've already created the basic roles and settings as
+described in :doc:`configuration`:
+
+.. code-block:: bash
+
+    $ openstack role add admin --project service --user nova
+
+Defining Services
+-----------------
+
+Keystone also acts as a service catalog to let other OpenStack systems know
+where relevant API endpoints exist for OpenStack Services. The OpenStack
+Dashboard, in particular, uses this heavily - and this **must** be configured
+for the OpenStack Dashboard to properly function.
+
+The endpoints for these services are defined in a template, an example of
+which is in the project as the file ``etc/default_catalog.templates``.
+
+Keystone supports two means of defining the services, one is the catalog
+template, as described above - in which case everything is detailed in that
+template.
+
+The other is a SQL backend for the catalog service, in which case after
+Keystone is online, you need to add the services to the catalog:
+
+.. code-block:: bash
+
+    $ openstack service create compute --name nova \
+                                    --description "Nova Compute Service"
+    $ openstack service create ec2 --name ec2 \
+                                   --description "EC2 Compatibility Layer"
+    $ openstack service create image --name glance \
+                                      --description "Glance Image Service"
+    $ openstack service create identity --name keystone \
+                                        --description "Keystone Identity Service"
+    $ openstack service create object-store --name swift \
+                                     --description "Swift Service"
+
+
+Setting Up Auth-Token Middleware
+================================
+
+The Keystone project provides the auth-token middleware which validates that
+the request is valid before passing it on to the application. This must be
+installed and configured in the applications (such as Nova, Glance, Swift,
+etc.). The `authentication middleware documentation`_ describes how to install
+and configure this middleware.
+
+.. _`authentication middleware documentation`: http://docs.openstack.org/developer/keystonemiddleware/middlewarearchitecture.html
diff --git a/keystone-moon/doc/source/developing.rst b/keystone-moon/doc/source/developing.rst
new file mode 100644 (file)
index 0000000..33b2dd5
--- /dev/null
@@ -0,0 +1,771 @@
+..
+      Copyright 2011-2012 OpenStack Foundation
+      All Rights Reserved.
+
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+========================
+Developing with Keystone
+========================
+
+Setup
+-----
+
+Get your development environment set up according to :doc:`setup`. The
+instructions from here will assume that you have installed Keystone into a
+virtualenv. If you chose not to, simply exclude "tools/with_venv.sh" from the
+example commands below.
+
+
+Configuring Keystone
+--------------------
+
+Keystone requires a configuration file.  There is a sample configuration file
+that can be used to get started:
+
+.. code-block:: bash
+
+    $ cp etc/keystone.conf.sample etc/keystone.conf
+
+The defaults are enough to get you going, but you can make any changes if
+needed.
+
+
+Running Keystone
+----------------
+
+To run the Keystone Admin and API server instances, use:
+
+.. code-block:: bash
+
+    $ tools/with_venv.sh bin/keystone-all
+
+This runs Keystone with the configuration the etc/ directory of the project.
+See :doc:`configuration` for details on how Keystone is configured. By default,
+Keystone is configured with SQL backends.
+
+
+Interacting with Keystone
+-------------------------
+
+You can interact with Keystone through the command line using
+:doc:`man/keystone-manage` which allows you to initialize keystone, etc.
+
+You can also interact with Keystone through its REST API. There is a Python
+Keystone client library `python-keystoneclient`_ which interacts exclusively
+through the REST API, and which Keystone itself uses to provide its
+command-line interface.
+
+When initially getting set up, after you've configured which databases to use,
+you're probably going to need to run the following to your database schema in
+place:
+
+.. code-block:: bash
+
+    $ bin/keystone-manage db_sync
+
+.. _`python-keystoneclient`: https://github.com/openstack/python-keystoneclient
+
+If the above commands result in a ``KeyError``, or they fail on a
+``.pyc`` file with the message, ``You can only have one Python script per
+version``, then it is possible that there are out-of-date compiled Python
+bytecode files in the Keystone directory tree that are causing problems. This
+can occur if you have previously installed and ran older versions of Keystone.
+These out-of-date files can be easily removed by running a command like the
+following from the Keystone root project directory:
+
+.. code-block:: bash
+
+    $ find . -name "*.pyc" -delete
+
+Database Schema Migrations
+--------------------------
+
+Keystone uses SQLAlchemy-migrate_ to migrate
+the SQL database between revisions. For core components, the migrations are
+kept in a central repository under ``keystone/common/sql/migrate_repo``.
+
+.. _SQLAlchemy-migrate: http://code.google.com/p/sqlalchemy-migrate/
+
+Extensions should be created as directories under ``keystone/contrib``. An
+extension that requires SQL migrations should not change the common repository,
+but should instead have its own repository. This repository must be in the
+extension's directory in ``keystone/contrib/<extension>/migrate_repo``. In
+addition, it needs a subdirectory named ``versions``. For example, if the
+extension name is ``my_extension`` then the directory structure would be
+``keystone/contrib/my_extension/migrate_repo/versions/``. For the migration to
+work, both the ``migrate_repo`` and ``versions`` subdirectories must have
+``__init__.py`` files. SQLAlchemy-migrate will look for a configuration file in
+the ``migrate_repo`` named ``migrate.cfg``. This conforms to a key/value `ini`
+file format. A sample configuration file with the minimal set of values is::
+
+    [db_settings]
+    repository_id=my_extension
+    version_table=migrate_version
+    required_dbs=[]
+
+The directory ``keystone/contrib/example`` contains a sample extension
+migration.
+
+Migrations must be explicitly run for each extension individually. To run a
+migration for a specific extension, simply run:
+
+.. code-block:: bash
+
+    $ keystone-manage db_sync --extension <name>
+
+Initial Sample Data
+-------------------
+
+There is an included script which is helpful in setting up some initial sample
+data for use with keystone:
+
+.. code-block:: bash
+
+    $ OS_SERVICE_TOKEN=ADMIN tools/with_venv.sh tools/sample_data.sh
+
+Notice it requires a service token read from an environment variable for
+authentication.  The default value "ADMIN" is from the ``admin_token``
+option in the ``[DEFAULT]`` section in ``etc/keystone.conf``.
+
+Once run, you can see the sample data that has been created by using the
+`python-keystoneclient`_ command-line interface:
+
+.. code-block:: bash
+
+    $ tools/with_venv.sh keystone --os-token ADMIN --os-endpoint http://127.0.0.1:35357/v2.0/ user-list
+
+Filtering responsibilities between controllers and drivers
+----------------------------------------------------------
+
+Keystone supports the specification of filtering on list queries as part of the
+v3 identity API. By default these queries are satisfied in the controller
+class when a controller calls the ``wrap_collection`` method at the end of a
+``list_{entity}`` method.  However, to enable optimum performance, any driver
+can implement some or all of the specified filters (for example, by adding
+filtering to the generated SQL statements to generate the list).
+
+The communication of the filter details between the controller level and its
+drivers is handled by the passing of a reference to a Hints object,
+which is a list of dicts describing the filters. A driver that satisfies a
+filter must delete the filter from the Hints object so that when it is returned
+to the controller level, it knows to only execute any unsatisfied
+filters.
+
+The contract for a driver for ``list_{entity}`` methods is therefore:
+
+* It MUST return a list of entities of the specified type
+* It MAY either just return all such entities, or alternatively reduce the
+  list by filtering for one or more of the specified filters in the passed
+  Hints reference, and removing any such satisfied filters. An exception to
+  this is that for identity drivers that support domains, then they should
+  at least support filtering by domain_id.
+
+Entity list truncation by drivers
+---------------------------------
+
+Keystone supports the ability for a deployment to restrict the number of
+entries returned from ``list_{entity}`` methods, typically to prevent poorly
+formed searches (e.g. without sufficient filters) from becoming a performance
+issue.
+
+These limits are set in the configuration file, either for a specific driver or
+across all drivers.  These limits are read at the Manager level and passed into
+individual drivers as part of the Hints list object. A driver should try and
+honor any such limit if possible, but if it is unable to do so then it may
+ignore it (and the truncation of the returned list of entities will happen at
+the controller level).
+
+Identity entity ID management between controllers and drivers
+-------------------------------------------------------------
+
+Keystone supports the option of having domain-specific backends for the
+identity driver (i.e. for user and group storage), allowing, for example,
+a different LDAP server for each domain. To ensure that Keystone can determine
+to which backend it should route an API call, starting with Juno, the
+identity manager will, provided that domain-specific backends are enabled,
+build on-the-fly a persistent mapping table between Keystone Public IDs that
+are presented to the controller and the domain that holds the entity, along
+with whatever local ID is understood by the driver.  This hides, for instance,
+the LDAP specifics of whatever ID is being used.
+
+To ensure backward compatibility, the default configuration of either a
+single SQL or LDAP backend for Identity will not use the mapping table,
+meaning that public facing IDs will be the unchanged. If keeping these IDs
+the same for the default LDAP backend is not required, then setting the
+configuration variable ``backward_compatible_ids`` to ``False`` will enable
+the mapping for the default LDAP driver, hence hiding the LDAP specifics of the
+IDs being used.
+
+Testing
+-------
+
+Running Tests
+=============
+
+Before running tests, you should have ``tox`` installed and available in your
+environment (in addition to the other external dependencies in :doc:`setup`):
+
+.. code-block:: bash
+
+    $ pip install tox
+
+.. NOTE::
+
+    You may need to perform both the above operation and the next inside a
+    python virtualenv, or prefix the above command with ``sudo``, depending on
+    your preference.
+
+To execute the full suite of tests maintained within Keystone, simply run:
+
+.. code-block:: bash
+
+    $ tox
+
+This iterates over multiple configuration variations, and uses external
+projects to do light integration testing to verify the Identity API against
+other projects.
+
+.. NOTE::
+
+    The first time you run ``tox``, it will take additional time to build
+    virtualenvs. You can later use the ``-r`` option with ``tox`` to rebuild
+    your virtualenv in a similar manner.
+
+To run tests for one or more specific test environments (for example, the most
+common configuration of Python 2.7 and PEP-8), list the environments with the
+``-e`` option, separated by spaces:
+
+.. code-block:: bash
+
+    $ tox -e py27,pep8
+
+See ``tox.ini`` for the full list of available test environments.
+
+Running with PDB
+~~~~~~~~~~~~~~~~
+
+Using PDB breakpoints with tox and testr normally doesn't work since the tests
+just fail with a BdbQuit exception rather than stopping at the breakpoint.
+
+To run with PDB breakpoints during testing, use the ``debug`` tox environment
+rather than ``py27``. Here's an example, passing the name of a test since
+you'll normally only want to run the test that hits your breakpoint:
+
+.. code-block:: bash
+
+    $ tox -e debug keystone.tests.test_auth.AuthWithToken.test_belongs_to
+
+For reference, the ``debug`` tox environment implements the instructions
+here: https://wiki.openstack.org/wiki/Testr#Debugging_.28pdb.29_Tests
+
+Disabling Stream Capture
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+The stdout, stderr and log messages generated during a test are captured and
+in the event of a test failure those streams will be printed to the terminal
+along with the traceback. The data is discarded for passing tests.
+
+Each stream has an environment variable that can be used to force captured
+data to be discarded even if the test fails: `OS_STDOUT_CAPTURE` for stdout,
+`OS_STDERR_CAPTURE` for stderr and `OS_LOG_CAPTURE` for logging. If the value
+of the environment variable is not one of (True, true, 1, yes) the stream will
+be discarded. All three variables default to 1.
+
+For example, to discard logging data during a test run:
+
+.. code-block:: bash
+
+    $ OS_LOG_CAPTURE=0 tox -e py27
+
+Test Structure
+==============
+
+Not all of the tests in the tests directory are strictly unit tests. Keystone
+intentionally includes tests that run the service locally and drives the entire
+configuration to achieve basic functional testing.
+
+For the functional tests, an in-memory key-value store is used to keep the
+tests fast.
+
+Within the tests directory, the general structure of the tests is a basic
+set of tests represented under a test class, and then subclasses of those
+tests under other classes with different configurations to drive different
+backends through the APIs.
+
+For example, ``test_backend.py`` has a sequence of tests under the class
+``IdentityTests`` that will work with the default drivers as configured in
+this projects etc/ directory. ``test_backend_sql.py`` subclasses those tests,
+changing the configuration by overriding with configuration files stored in
+the tests directory aimed at enabling the SQL backend for the Identity module.
+
+Likewise, ``test_v2_keystoneclient.py`` takes advantage of the tests written
+against ``KeystoneClientTests`` to verify the same tests function through
+different drivers and releases of the Keystone client.
+
+The class ``CompatTestCase`` does the work of checking out a specific version
+of python-keystoneclient, and then verifying it against a temporarily running
+local instance to explicitly verify basic functional testing across the API.
+
+Testing Schema Migrations
+=========================
+
+The application of schema migrations can be tested using SQLAlchemy Migrate’s
+built-in test runner, one migration at a time.
+
+.. WARNING::
+
+    This may leave your database in an inconsistent state; attempt this in non-production environments only!
+
+This is useful for testing the *next* migration in sequence (both forward &
+backward) in a database under version control:
+
+.. code-block:: bash
+
+    $ python keystone/common/sql/migrate_repo/manage.py test \
+    --url=sqlite:///test.db \
+    --repository=keystone/common/sql/migrate_repo/
+
+This command references to a SQLite database (test.db) to be used. Depending on
+the migration, this command alone does not make assertions as to the integrity
+of your data during migration.
+
+
+Writing Tests
+=============
+
+To add tests covering all drivers, update the relevant base test class
+(``test_backend.py``, ``test_legacy_compat.py``, and
+``test_keystoneclient.py``).
+
+To add new drivers, subclass the ``test_backend.py`` (look towards
+``test_backend_sql.py`` or ``test_backend_kvs.py`` for examples) and update the
+configuration of the test class in ``setUp()``.
+
+
+Further Testing
+===============
+
+devstack_ is the *best* way to quickly deploy Keystone with the rest of the
+OpenStack universe and should be critical step in your development workflow!
+
+You may also be interested in either the
+`OpenStack Continuous Integration Infrastructure`_ or the
+`OpenStack Integration Testing Project`_.
+
+.. _devstack: http://devstack.org/
+.. _OpenStack Continuous Integration Infrastructure: http://ci.openstack.org
+.. _OpenStack Integration Testing Project: https://github.com/openstack/tempest
+
+
+LDAP Tests
+==========
+
+LDAP has a fake backend that performs rudimentary operations.  If you
+are building more significant LDAP functionality, you should test against
+a live LDAP server.  Devstack has an option to set up a directory server for
+Keystone to use.  Add ldap to the ``ENABLED_SERVICES`` environment variable,
+and set environment variables ``KEYSTONE_IDENTITY_BACKEND=ldap`` and
+``KEYSTONE_CLEAR_LDAP=yes`` in your ``localrc`` file.
+
+The unit tests can be run against a live server with
+``keystone/tests/test_ldap_livetest.py`` and
+``keystone/tests/test_ldap_pool_livetest.py``. The default password is ``test``
+but if you have installed devstack with a different LDAP password, modify the
+file ``keystone/tests/config_files/backend_liveldap.conf`` and
+``keystone/tests/config_files/backend_pool_liveldap.conf`` to reflect your password.
+
+.. NOTE::
+    To run the live tests you need to set the environment variable ``ENABLE_LDAP_LIVE_TEST``
+    to a non-negative value.
+
+
+"Work in progress" Tests
+========================
+
+Work in progress (WIP) tests are very useful in a variety of situations
+including:
+
+* During a TDD process they can be used to add tests to a review while
+  they are not yet working and will not cause test failures. (They should
+  be removed before the final merge.)
+* Often bug reports include small snippets of code to show broken
+  behaviors. Some of these can be converted into WIP tests that can later
+  be worked on by a developer. This allows us to take code that can be
+  used to catch bug regressions and commit it before any code is
+  written.
+
+The ``keystone.tests.util.wip`` decorator can be used to mark a test as
+WIP. A WIP test will always be run. If the test fails then a TestSkipped
+exception is raised because we expect the test to fail. We do not pass
+the test in this case so that it doesn't count toward the number of
+successfully run tests. If the test passes an AssertionError exception is
+raised so that the developer knows they made the test pass. This is a
+reminder to remove the decorator.
+
+The ``wip`` decorator requires that the author provides a message. This
+message is important because it will tell other developers why this test
+is marked as a work in progress. Reviewers will require that these
+messages are descriptive and accurate.
+
+.. NOTE::
+    The ``wip`` decorator is not a replacement for skipping tests.
+
+.. code-block:: python
+
+    @wip('waiting on bug #000000')
+    def test():
+        pass
+
+
+Generating Updated Sample Config File
+-------------------------------------
+
+Keystone's sample configuration file ``etc/keystone.conf.sample`` is automatically
+generated based upon all of the options available within Keystone. These options
+are sourced from the many files around Keystone as well as some external libraries.
+
+If new options are added, primarily located in ``keystone.common.config``, a new
+sample configuration file needs to be generated. Generating a new sample configuration
+to be included in a commit run:
+
+.. code-block:: bash
+
+    $ tox -esample_config -r
+
+The tox command will place an updated sample config in ``etc/keystone.conf.sample``.
+
+If there is a new external library (e.g. ``oslo.messaging``) that utilizes the
+``oslo.config`` package for configuration, it can be added to the list of libraries
+found in ``tools/config/oslo.config.generator.rc``.
+
+
+Translated responses
+--------------------
+
+The Keystone server can provide error responses translated into the language in
+the ``Accept-Language`` header of the request. In order to test this in your
+development environment, there's a couple of things you need to do.
+
+1. Build the message files. Run the following command in your keystone
+   directory:
+
+.. code-block:: bash
+
+   $ python setup.py compile_catalog
+
+This will generate .mo files like keystone/locale/[lang]/LC_MESSAGES/[lang].mo
+
+2. When running Keystone, set the ``KEYSTONE_LOCALEDIR`` environment variable
+   to the keystone/locale directory. For example:
+
+.. code-block:: bash
+
+   $ KEYSTONE_LOCALEDIR=/opt/stack/keystone/keystone/locale keystone-all
+
+Now you can get a translated error response:
+
+.. code-block:: bash
+
+ $ curl -s -H "Accept-Language: zh" http://localhost:5000/notapath | python -mjson.tool
+ {
+     "error": {
+         "code": 404,
+         "message": "\u627e\u4e0d\u5230\u8cc7\u6e90\u3002",
+         "title": "Not Found"
+     }
+ }
+
+
+Caching Layer
+-------------
+
+The caching layer is designed to be applied to any ``manager`` object within Keystone
+via the use of the ``on_arguments`` decorator provided in the ``keystone.common.cache``
+module.  This decorator leverages `dogpile.cache`_ caching system to provide a flexible
+caching backend.
+
+It is recommended that each of the managers have an independent toggle within the config
+file to enable caching.  The easiest method to utilize the toggle within the
+configuration file is to define a ``caching`` boolean option within that manager's
+configuration section (e.g. ``identity``).  Once that option is defined you can
+pass function to the ``on_arguments`` decorator with the named argument ``should_cache_fn``.
+In the ``keystone.common.cache`` module, there is a function called ``should_cache_fn``,
+which will provide a reference, to a function, that will consult the global cache
+``enabled`` option as well as the specific manager's caching enable toggle.
+
+    .. NOTE::
+        If a section-specific boolean option is not defined in the config section specified when
+        calling ``should_cache_fn``, the returned function reference will default to enabling
+        caching for that ``manager``.
+
+Example use of cache and ``should_cache_fn`` (in this example, ``token`` is the manager):
+
+.. code-block:: python
+
+    from keystone.common import cache
+    SHOULD_CACHE = cache.should_cache_fn('token')
+
+    @cache.on_arguments(should_cache_fn=SHOULD_CACHE)
+    def cacheable_function(arg1, arg2, arg3):
+        ...
+        return some_value
+
+With the above example, each call to the ``cacheable_function`` would check to see if
+the arguments passed to it matched a currently valid cached item.  If the return value
+was cached, the caching layer would return the cached value; if the return value was
+not cached, the caching layer would call the function, pass the value to the ``SHOULD_CACHE``
+function reference, which would then determine if caching was globally enabled and enabled
+for the ``token`` manager.  If either caching toggle is disabled, the value is returned but
+not cached.
+
+It is recommended that each of the managers have an independent configurable time-to-live (TTL).
+If a configurable TTL has been defined for the manager configuration section, it is possible to
+pass it to the ``cache.on_arguments`` decorator with the named-argument ``expiration_time``.  For
+consistency, it is recommended that this option be called ``cache_time`` and default to ``None``.
+If the ``expiration_time`` argument passed to the decorator is set to ``None``, the expiration
+time will be set to the global default (``expiration_time`` option in the ``[cache]``
+configuration section.
+
+Example of using a section specific ``cache_time`` (in this example, ``identity`` is the manager):
+
+.. code-block:: python
+
+    from keystone.common import cache
+    SHOULD_CACHE = cache.should_cache_fn('identity')
+
+    @cache.on_arguments(should_cache_fn=SHOULD_CACHE,
+                        expiration_time=CONF.identity.cache_time)
+    def cachable_function(arg1, arg2, arg3):
+        ...
+        return some_value
+
+For cache invalidation, the ``on_arguments`` decorator will add an ``invalidate`` method
+(attribute) to your decorated function.  To invalidate the cache, you pass the same arguments
+to the ``invalidate`` method as you would the normal function.
+
+Example (using the above cacheable_function):
+
+.. code-block:: python
+
+    def invalidate_cache(arg1, arg2, arg3):
+        cacheable_function.invalidate(arg1, arg2, arg3)
+
+.. WARNING::
+    The ``on_arguments`` decorator does not accept keyword-arguments/named arguments.  An
+    exception will be raised if keyword arguments are passed to a caching-decorated function.
+
+.. NOTE::
+    In all cases methods work the same as functions except if you are attempting to invalidate
+    the cache on a decorated bound-method, you need to pass  ``self`` to the ``invalidate``
+    method as the first argument before the arguments.
+
+.. _`dogpile.cache`: http://dogpilecache.readthedocs.org/
+
+
+dogpile.cache based Key-Value-Store (KVS)
+-----------------------------------------
+The ``dogpile.cache`` based KVS system has been designed to allow for flexible stores for the
+backend of the KVS system. The implementation allows for the use of any normal ``dogpile.cache``
+cache backends to be used as a store. All interfacing to the KVS system happens via the
+``KeyValueStore`` object located at ``keystone.common.kvs.KeyValueStore``.
+
+To utilize the KVS system an instantiation of the ``KeyValueStore`` class is needed. To acquire
+a KeyValueStore instantiation use the ``keystone.common.kvs.get_key_value_store`` factory
+function. This factory will either create a new ``KeyValueStore`` object or retrieve the
+already instantiated ``KeyValueStore`` object by the name passed as an argument. The object must
+be configured before use. The KVS object will only be retrievable with the
+``get_key_value_store`` function while there is an active reference outside of the registry.
+Once all references have been removed the object is gone (the registry uses a ``weakref`` to
+match the object to the name).
+
+Example Instantiation and Configuration:
+
+.. code-block:: python
+
+    kvs_store = kvs.get_key_value_store('TestKVSRegion')
+    kvs_store.configure('openstack.kvs.Memory', ...)
+
+Any keyword arguments passed to the configure method that are not defined as part of the
+KeyValueStore object configuration are passed to the backend for further configuration (e.g.
+memcached servers, lock_timeout, etc).
+
+The memcached backend uses the Keystone manager mechanism to support the use of any of the
+provided memcached backends (``bmemcached``, ``pylibmc``, and basic ``memcached``).
+By default the ``memcached`` backend is used.  Currently the Memcache URLs come from the
+``servers`` option in the ``[memcache]`` configuration section of the Keystone config.
+
+The following is an example showing how to configure the KVS system to use a
+KeyValueStore object named "TestKVSRegion" and a specific Memcached driver:
+
+.. code-block:: python
+
+    kvs_store = kvs.get_key_value_store('TestKVSRegion')
+    kvs_store.configure('openstack.kvs.Memcached', memcached_backend='Memcached')
+
+The memcached backend supports a mechanism to supply an explicit TTL (in seconds) to all keys
+set via the KVS object. This is accomplished by passing the argument ``memcached_expire_time``
+as a keyword argument to the ``configure`` method. Passing the ``memcache_expire_time`` argument
+will cause the ``time`` argument to be added to all ``set`` and ``set_multi`` calls performed by
+the memcached client. ``memcached_expire_time`` is an argument exclusive to the memcached dogpile
+backend, and will be ignored if passed to another backend:
+
+.. code-block:: python
+
+    kvs_store.configure('openstack.kvs.Memcached', memcached_backend='Memcached',
+                        memcached_expire_time=86400)
+
+If an explicit TTL is configured via the ``memcached_expire_time`` argument, it is possible to
+exempt specific keys from receiving the TTL by passing the argument ``no_expiry_keys`` (list)
+as a keyword argument to the ``configure`` method. ``no_expiry_keys`` should be supported by
+all OpenStack-specific dogpile backends (memcached) that have the ability to set an explicit TTL:
+
+.. code-block:: python
+
+    kvs_store.configure('openstack.kvs.Memcached', memcached_backend='Memcached',
+                    memcached_expire_time=86400, no_expiry_keys=['key', 'second_key', ...])
+
+
+.. NOTE::
+    For the non-expiring keys functionality to work, the backend must support the ability for
+    the region to set the key_mangler on it and have the attribute ``raw_no_expiry_keys``.
+    In most cases, support for setting the key_mangler on the backend is handled by allowing
+    the region object to set the ``key_mangler`` attribute on the backend.
+
+    The ``raw_no_expiry_keys`` attribute is expected to be used to hold the values of the
+    keyword argument ``no_expiry_keys`` prior to hashing. It is the responsibility of the
+    backend to use these raw values to determine if a key should be exempt from expiring
+    and not set the TTL on the non-expiring keys when the ``set`` or ``set_multi`` methods are
+    called.
+
+    Typically the key will be hashed by the region using its key_mangler method
+    before being passed to the backend to set the value in the KeyValueStore. This
+    means that in most cases, the backend will need to either pre-compute the hashed versions
+    of the keys (when the key_mangler is set) and store a cached copy, or hash each item in
+    the ``raw_no_expiry_keys`` attribute on each call to ``.set()`` and ``.set_multi()``. The
+    ``memcached`` backend handles this hashing and caching of the keys by utilizing an
+    ``@property`` method for the ``.key_mangler`` attribute on the backend and utilizing the
+    associated ``.settr()`` method to front-load the hashing work at attribute set time.
+
+Once a KVS object has been instantiated the method of interacting is the same as most memcache
+implementations:
+
+.. code-block:: python
+
+    kvs_store = kvs.get_key_value_store('TestKVSRegion')
+    kvs_store.configure(...)
+    # Set a Value
+    kvs_store.set(<Key>, <Value>)
+    # Retrieve a value:
+    retrieved_value = kvs_store.get(<key>)
+    # Delete a key/value pair:
+    kvs_store.delete(<key>)
+    # multi-get:
+    kvs_store.get_multi([<key>, <key>, ...])
+    # multi-set:
+    kvs_store.set_multi(dict(<key>=<value>, <key>=<value>, ...))
+    # multi-delete
+    kvs_store.delete_multi([<key>, <key>, ...])
+
+
+There is a global configuration option to be aware of (that can be set in the ``[kvs]`` section of
+the Keystone configuration file): ``enable_key_mangler`` can be set top false, disabling the use of
+key_manglers (modification of the key when saving to the backend to help prevent
+collisions or exceeding key size limits with memcached).
+
+.. NOTE::
+    The ``enable_key_mangler`` option in the ``[kvs]`` section of the Keystone configuration file
+    is not the same option (and does not affect the cache-layer key manglers) from the option in the
+    ``[cache]`` section of the configuration file. Similarly the ``[cache]`` section options
+    relating to key manglers has no bearing on the ``[kvs]`` objects.
+
+.. WARNING::
+    Setting the ``enable_key_mangler`` option to False can have detrimental effects on the
+    KeyValueStore backend. It is recommended that this value is not set to False except for
+    debugging issues with the ``dogpile.cache`` backend itself.
+
+Any backends that are to be used with the ``KeyValueStore`` system need to be registered with
+dogpile. For in-tree/provided backends, the registration should occur in
+``keystone/common/kvs/__init__.py``. For backends that are developed out of tree, the location
+should be added to the ``backends`` option in the ``[kvs]`` section of the Keystone configuration::
+
+    [kvs]
+    backends = backend_module1.backend_class1,backend_module2.backend_class2
+
+All registered backends will receive the "short name" of "openstack.kvs.<class name>" for use in the
+``configure`` method on the ``KeyValueStore`` object.  The ``<class name>`` of a backend must be
+globally unique.
+
+dogpile.cache based MongoDB (NoSQL) backend
+--------------------------------------------
+
+The ``dogpile.cache`` based MongoDB backend implementation allows for various MongoDB
+configurations, e.g., standalone, a replica set, sharded replicas, with or without SSL,
+use of TTL type collections, etc.
+
+Example of typical configuration for MongoDB backend:
+
+.. code-block:: python
+
+    from dogpile.cache import region
+
+    arguments = {
+        'db_hosts': 'localhost:27017',
+        'db_name': 'ks_cache',
+        'cache_collection': 'cache',
+        'username': 'test_user',
+        'password': 'test_password',
+
+        # optional arguments
+        'son_manipulator': 'my_son_manipulator_impl'
+    }
+
+    region.make_region().configure('keystone.cache.mongo',
+                                   arguments=arguments)
+
+The optional `son_manipulator` is used to manipulate custom data type while its saved in
+or retrieved from MongoDB. If the dogpile cached values contain built-in data types and no
+custom classes, then the provided implementation class is sufficient. For further details, refer
+http://api.mongodb.org/python/current/examples/custom_type.html#automatic-encoding-and-decoding
+
+Similar to other backends, this backend can be added via Keystone configuration in
+``keystone.conf``::
+
+    [cache]
+    # Global cache functionality toggle.
+    enabled = True
+
+    # Referring to specific cache backend
+    backend = keystone.cache.mongo
+
+    # Backend specific configuration arguments
+    backend_argument = db_hosts:localhost:27017
+    backend_argument = db_name:ks_cache
+    backend_argument = cache_collection:cache
+    backend_argument = username:test_user
+    backend_argument = password:test_password
+
+This backend is registered in ``keystone.common.cache.core`` module. So, its usage
+is similar to other dogpile caching backends as it implements the same dogpile APIs.
+
+
+Building the Documentation
+--------------------------
+
+The documentation is generated with Sphinx using the tox command.  To create HTML docs and man pages:
+
+.. code-block:: bash
+
+    $ tox -e docs
+
+The results are in the docs/build/html and docs/build/man directories respectively.
diff --git a/keystone-moon/doc/source/event_notifications.rst b/keystone-moon/doc/source/event_notifications.rst
new file mode 100644 (file)
index 0000000..740986b
--- /dev/null
@@ -0,0 +1,416 @@
+
+..
+      Copyright 2013 IBM Corp.
+
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+============================
+Keystone Event Notifications
+============================
+
+Keystone provides notifications about usage data so that 3rd party applications
+can use the data for billing, monitoring, or quota purposes. This document
+describes the current inclusions and exclusions for Keystone notifications.
+
+Keystone currently supports two notification formats: a Basic Notification,
+and a Cloud Auditing Data Federation (`CADF`_) Notification.
+The supported operations between the two types of notification formats are
+documented below.
+
+Common Notification Structure
+==============================
+
+Notifications generated by Keystone are generated in JSON format. An external
+application can format them into ATOM format and publish them as a feed.
+Currently, all notifications are immediate, meaning they are generated when a
+specific event happens. Notifications all adhere to a specific top level
+format:
+
+.. code-block:: javascript
+
+    {
+        "event_type": "identity.<resource_type>.<operation>",
+        "message_id": "<message_id>",
+        "payload": {},
+        "priority": "INFO",
+        "publisher_id": "identity.<hostname>",
+        "timestamp": "<timestamp>"
+    }
+
+Where ``<resource_type>`` is a Keystone resource, such as user or project, and
+``<operation>`` is a Keystone operation, such as created, deleted.
+
+The key differences between the two notification formats (Basic and CADF), lie
+within the ``payload`` portion of the notification.
+
+The ``priority`` of the notification being sent is not configurable through
+the Keystone configuration file. This value is defaulted to INFO for all
+notifications sent in Keystone's case.
+
+Basic Notifications
+===================
+
+All basic notifications contain a limited amount of information, specifically,
+just the resource type, operation, and resource id.
+
+The ``payload`` portion of a Basic Notification is a single key-value pair.
+
+.. code-block:: javascript
+
+    {
+        "resource_info": <resource_id>
+    }
+
+Where ``<resource_id>`` is the unique identifier assigned to the
+``resource_type`` that is undergoing the ``<operation>``.
+
+Supported Events
+----------------
+
+The following table displays the compatibility between resource types and
+operations.
+
+========================  =================================
+resource type             supported operations
+========================  =================================
+group                     create, update, delete
+project                   create, update, delete
+role                      create, update, delete
+domain                    create, update, delete
+user                      create, update, delete
+trust                     create, delete
+region                    create, update, delete
+endpoint                  create, update, delete
+service                   create, update, delete
+policy                    create, update, delete
+========================  =================================
+
+Note, ``trusts`` are an immutable resource, they do not support ``update``
+operations.
+
+Example Notification
+--------------------
+
+This is an example of a notification sent for a newly created user:
+
+.. code-block:: javascript
+
+    {
+        "event_type": "identity.user.created",
+        "message_id": "0156ee79-b35f-4cef-ac37-d4a85f231c69",
+        "payload": {
+            "resource_info": "671da331c47d4e29bb6ea1d270154ec3"
+        },
+        "priority": "INFO",
+        "publisher_id": "identity.host1234",
+        "timestamp": "2013-08-29 19:03:45.960280"
+    }
+
+If the operation fails, the notification won't be sent, and no special error
+notification will be sent. Information about the error is handled through
+normal exception paths.
+
+Auditing with CADF
+==================
+
+Keystone uses the `PyCADF`_ library to emit CADF notifications, these events
+adhere to the DMTF `CADF`_ specification. This standard provides auditing
+capabilities for compliance with security, operational, and business processes
+and supports normalized and categorized event data for federation and
+aggregation.
+
+.. _PyCADF: http://docs.openstack.org/developer/pycadf
+.. _CADF: http://www.dmtf.org/standards/cadf
+
+CADF notifications include additional context data around the ``resource``,
+the ``action`` and the ``initiator``.
+
+CADF notifications may be emitted by changing the ``notification_format`` to
+``cadf`` in the configuration file.
+
+The ``payload`` portion of a CADF Notification is a CADF ``event``, which
+is represented as a JSON dictionary. For example:
+
+.. code-block:: javascript
+
+    {
+        "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event",
+        "initiator": {
+            "typeURI": "service/security/account/user",
+            "host": {
+                "agent": "curl/7.22.0(x86_64-pc-linux-gnu)",
+                "address": "127.0.0.1"
+            },
+            "id": "<initiator_id>"
+        },
+        "target": {
+            "typeURI": "<target_uri>",
+            "id": "openstack:1c2fc591-facb-4479-a327-520dade1ea15"
+        },
+        "observer": {
+            "typeURI": "service/security",
+            "id": "openstack:3d4a50a9-2b59-438b-bf19-c231f9c7625a"
+        },
+        "eventType": "activity",
+        "eventTime": "2014-02-14T01:20:47.932842+00:00",
+        "action": "<action>",
+        "outcome": "success",
+        "id": "openstack:f5352d7b-bee6-4c22-8213-450e7b646e9f",
+    }
+
+Where the following are defined:
+
+* ``<initiator_id>``: ID of the user that performed the operation
+* ``<target_uri>``: CADF specific target URI, (i.e.:  data/security/project)
+* ``<action>``: The action being performed, typically:
+  ``<operation>``. ``<resource_type>``
+
+Additionally there may be extra keys present depending on the operation being
+performed, these will be discussed below.
+
+Note, the ``eventType`` property of the CADF payload is different from the
+``event_type`` property of a notifications. The former (``eventType``) is a
+CADF keyword which designates the type of event that is being measured, this
+can be: `activity`, `monitor` or `control`. Whereas the latter
+(``event_type``) is described in previous sections as:
+`identity.<resource_type>.<operation>`
+
+Supported Events
+----------------
+
+The following table displays the compatibility between resource types and
+operations.
+
+======================  =============================  =============================
+resource type           supported operations           typeURI
+======================  =============================  =============================
+group                   create, update, delete         data/security/group
+project                 create, update, delete         data/security/project
+role                    create, update, delete         data/security/role
+domain                  create, update, delete         data/security/domain
+user                    create, update, delete         data/security/account/user
+trust                   create, delete                 data/security/trust
+region                  create, update, delete         data/security/region
+endpoint                create, update, delete         data/security/endpoint
+service                 create, update, delete         data/security/service
+policy                  create, update, delete         data/security/policy
+role assignment         add, remove                    data/security/account/user
+None                    authenticate                   data/security/account/user
+======================  =============================  =============================
+
+Example Notification - Project Create
+-------------------------------------
+
+The following is an example of a notification that is sent when a project is
+created. This example can be applied for any ``create``, ``update`` or
+``delete`` event that is seen in the table above. The ``<action>`` and
+``typeURI`` fields will be change.
+
+The difference to note is the inclusion of the ``resource_info`` field which
+contains the ``<resource_id>`` that is undergoing the operation. Thus creating
+a common element between the CADF and Basic notification formats.
+
+.. code-block:: javascript
+
+    {
+        "event_type": "identity.project.created",
+        "message_id": "0156ee79-b35f-4cef-ac37-d4a85f231c69",
+        "payload": {
+            "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event",
+            "initiator": {
+                "typeURI": "service/security/account/user",
+                "host": {
+                    "agent": "curl/7.22.0(x86_64-pc-linux-gnu)",
+                    "address": "127.0.0.1"
+                },
+                "id": "c9f76d3c31e142af9291de2935bde98a"
+            },
+            "target": {
+                "typeURI": "data/security/project",
+                "id": "openstack:1c2fc591-facb-4479-a327-520dade1ea15"
+            },
+            "observer": {
+                "typeURI": "service/security",
+                "id": "openstack:3d4a50a9-2b59-438b-bf19-c231f9c7625a"
+            },
+            "eventType": "activity",
+            "eventTime": "2014-02-14T01:20:47.932842+00:00",
+            "action": "created.project",
+            "outcome": "success",
+            "id": "openstack:f5352d7b-bee6-4c22-8213-450e7b646e9f",
+            "resource_info": "671da331c47d4e29bb6ea1d270154ec3"
+        }
+        "priority": "INFO",
+        "publisher_id": "identity.host1234",
+        "timestamp": "2013-08-29 19:03:45.960280"
+    }
+
+Example Notification - Authentication
+-------------------------------------
+
+The following is an example of a notification that is sent when a user
+authenticates with Keystone.
+
+Note that this notification will be emitted if a user successfully
+authenticates, and when a user fails to authenticate.
+
+.. code-block:: javascript
+
+    {
+        "event_type": "identity.authenticate",
+        "message_id": "1371a590-d5fd-448f-b3bb-a14dead6f4cb",
+        "payload": {
+            "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event",
+            "initiator": {
+                "typeURI": "service/security/account/user",
+                "host": {
+                    "agent": "curl/7.22.0(x86_64-pc-linux-gnu)",
+                    "address": "127.0.0.1"
+                },
+                "id": "c9f76d3c31e142af9291de2935bde98a"
+            },
+            "target": {
+                "typeURI": "service/security/account/user",
+                "id": "openstack:1c2fc591-facb-4479-a327-520dade1ea15"
+            },
+            "observer": {
+                "typeURI": "service/security",
+                "id": "openstack:3d4a50a9-2b59-438b-bf19-c231f9c7625a"
+            },
+            "eventType": "activity",
+            "eventTime": "2014-02-14T01:20:47.932842+00:00",
+            "action": "authenticate",
+            "outcome": "success",
+            "id": "openstack:f5352d7b-bee6-4c22-8213-450e7b646e9f"
+        },
+        "priority": "INFO",
+        "publisher_id": "identity.host1234",
+        "timestamp": "2014-02-14T01:20:47.932842"
+    }
+
+Example Notification - Federated Authentication
+-----------------------------------------------
+
+The following is an example of a notification that is sent when a user
+authenticates with Keystone via Federation.
+
+This example is similar to the one seen above, however the ``initiator``
+portion of the ``payload`` contains a new ``credential`` section.
+
+.. code-block:: javascript
+
+    {
+        "event_type": "identity.authenticate",
+        "message_id": "1371a590-d5fd-448f-b3bb-a14dead6f4cb",
+        "payload": {
+            "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event",
+            "initiator": {
+                "credential": {
+                    "type": "http://docs.oasis-open.org/security/saml/v2.0",
+                    "token": "671da331c47d4e29bb6ea1d270154ec3",
+                    "identity_provider": "ACME",
+                    "user": "c9f76d3c31e142af9291de2935bde98a",
+                    "groups": [
+                        "developers"
+                    ]
+                },
+                "typeURI": "service/security/account/user",
+                "host": {
+                    "agent": "curl/7.22.0(x86_64-pc-linux-gnu)",
+                    "address": "127.0.0.1"
+                },
+                "id": "c9f76d3c31e142af9291de2935bde98a"
+            },
+            "target": {
+                "typeURI": "service/security/account/user",
+                "id": "openstack:1c2fc591-facb-4479-a327-520dade1ea15"
+            },
+            "observer": {
+                "typeURI": "service/security",
+                "id": "openstack:3d4a50a9-2b59-438b-bf19-c231f9c7625a"
+            },
+            "eventType": "activity",
+            "eventTime": "2014-02-14T01:20:47.932842+00:00",
+            "action": "authenticate",
+            "outcome": "success",
+            "id": "openstack:f5352d7b-bee6-4c22-8213-450e7b646e9f"
+        },
+        "priority": "INFO",
+        "publisher_id": "identity.host1234",
+        "timestamp": "2014-02-14T01:20:47.932842"
+    }
+
+Example Notification - Role Assignment
+--------------------------------------
+
+The following is an example of a notification that is sent when a role is
+granted or revoked to a project or domain, for a user or group.
+
+It is important to note that this type of notification has many new keys
+that convey the necessary information. Expect the following in the ``payload``:
+``role``, ``inherited_to_project``, ``project`` or ``domain``, ``user`` or
+``group``. With the exception of ``inherited_to_project``, each will represent
+the unique identifier of the resource type.
+
+.. code-block:: javascript
+
+    {
+        "event_type": "identity.created.role_assignment",
+        "message_id": "a5901371-d5fd-b3bb-448f-a14dead6f4cb",
+        "payload": {
+            "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event",
+            "initiator": {
+                "typeURI": "service/security/account/user",
+                "host": {
+                    "agent": "curl/7.22.0(x86_64-pc-linux-gnu)",
+                    "address": "127.0.0.1"
+                },
+                "id": "c9f76d3c31e142af9291de2935bde98a"
+            },
+            "target": {
+                "typeURI": "service/security/account/user",
+                "id": "openstack:1c2fc591-facb-4479-a327-520dade1ea15"
+            },
+            "observer": {
+                "typeURI": "service/security",
+                "id": "openstack:3d4a50a9-2b59-438b-bf19-c231f9c7625a"
+            },
+            "eventType": "activity",
+            "eventTime": "2014-08-20T01:20:47.932842+00:00",
+            "role": "0e6b990380154a2599ce6b6e91548a68",
+            "project": "24bdcff1aab8474895dbaac509793de1",
+            "inherited_to_projects": false,
+            "group": "c1e22dc67cbd469ea0e33bf428fe597a",
+            "action": "created.role_assignment",
+            "outcome": "success",
+            "id": "openstack:f5352d7b-bee6-4c22-8213-450e7b646e9f"
+        },
+        "priority": "INFO",
+        "publisher_id": "identity.host1234",
+        "timestamp": "2014-08-20T01:20:47.932842"
+    }
+
+Recommendations for consumers
+=============================
+
+One of the most important notifications that Keystone emits is for project
+deletions (``event_type`` = ``identity.project.deleted``). This event should
+indicate to the rest of OpenStack that all resources (such as virtual machines)
+associated with the project should be deleted.
+
+Projects can also have update events (``event_type`` =
+``identity.project.updated``), wherein the project has been disabled. Keystone
+ensures this has an immediate impact on the accessibility of the project's
+resources by revoking tokens with authorization on the project, but should
+**not** have a direct impact on the projects resources (in other words, virtual
+machines should **not** be deleted).
diff --git a/keystone-moon/doc/source/extension_development.rst b/keystone-moon/doc/source/extension_development.rst
new file mode 100644 (file)
index 0000000..a024849
--- /dev/null
@@ -0,0 +1,303 @@
+..
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+=====================================
+Keystone Extensions Development Guide
+=====================================
+
+General
+=======
+
+This Extension Development Guide provides some mocked code to use as an
+Extension code base in the ``keystone/contrib/example`` folder.
+
+- All Extensions must be created in the ``keystone/contrib`` folder.
+- The new Extension code must be contained in a new folder under ``contrib``.
+- Whenever possible an Extension should follow the following directory
+  structure convention::
+
+      keystone/contrib/
+      └── my_extension
+          ├── backends (optional)
+          │   ├── __init__.py (mandatory)
+          │   └── sql.py (optional)
+          │   └── kvs.py (optional)
+          ├── migrate_repo (optional)
+          │   ├── __init__.py (mandatory)
+          │   ├── migrate.cfg (mandatory)
+          │   └── versions (mandatory)
+          │       ├── 001_create_tables.py (mandatory)
+          │       └── __init__.py (mandatory)
+          ├── __init__.py (mandatory)
+          ├── core.py (mandatory)
+          ├── controllers.py (mandatory for API Extension)
+          └── routers.py (mandatory for API Extension)
+
+- If the Extension implements an API Extension the ``controllers.py`` and
+  ``routers.py`` must be present and correctly handle the API Extension
+  requests and responses.
+- If the Extension implements backends a ``backends`` folder should exist.
+  Backends are defined to store data persistently and can use a variety of
+  technologies. Please see the Backends section in this document for more info.
+- If the Extension adds data structures, then a ``migrate_repo`` folder should
+  exist.
+- If configuration changes are required/introduced in the
+  ``keystone.conf.sample`` file, these should be kept disabled as default and
+  have their own section.
+- If configuration changes are required/introduced in the
+  ``keystone-paste.ini``, the new filter must be declared.
+- The module may register to listen to events by declaring the corresponding
+  callbacks in the ``core.py`` file.
+- The new extension should be disabled by default (it should not affect the
+  default application pipelines).
+
+Modifying the `keystone.conf.sample` File
+=========================================
+
+In the case an Extension needs to change the ``keystone.conf.sample`` file, it
+must follow the config file conventions and introduce a dedicated section.
+
+Example::
+
+    [example]
+    driver = keystone.contrib.example.backends.sql.mySQLClass
+
+    [my_other_extension]
+    extension_flag = False
+
+The Extension parameters expressed should be commented out since, by default,
+extensions are disabled.
+
+Example::
+
+    [example]
+    #driver = keystone.contrib.example.backends.sql.mySQLClass
+
+    [my_other_extension]
+    #extension_flag = False
+
+In case the Extension is overriding or re-implementing an existing portion of
+Keystone, the required change should be commented in the ``configuration.rst``
+but not placed in the `keystone.conf.sample` file to avoid unnecessary
+confusion.
+
+Modifying the ``keystone-paste.ini`` File
+=========================================
+
+In the case an Extension is augmenting a pipeline introducing a new ``filter``
+and/or APIs in the ``OS`` namespace, a corresponding ``filter:`` section is
+necessary to be introduced in the ``keystone-paste.ini`` file. The Extension
+should declare the filter factory constructor in the ``ini`` file.
+
+Example::
+
+    [filter:example]
+    paste.filter_factory = keystone.contrib.example.routers:ExampleRouter.
+    factory
+
+The ``filter`` must not be placed in the ``pipeline`` and treated as optional.
+How to add the extension in the pipeline should be specified in detail in the
+``configuration.rst`` file.
+
+Package Constructor File
+========================
+
+The ``__init__.py`` file represents the package constructor. Extension needs to
+import what is necessary from the ``core.py`` module.
+
+Example:
+
+.. code-block:: python
+
+   from keystone.contrib.example.core import *
+
+Core
+====
+
+The ``core.py`` file represents the main module defining the data structure and
+interface. In the ``Model View Control`` (MVC) model it represents the
+``Model`` part and it delegates to the ``Backends`` the data layer
+implementation.
+
+In case the ``core.py`` file contains a ``Manager`` and a ``Driver`` it must
+provide the dependency injections for the ``Controllers`` and/or other modules
+using the ``Manager``. A good practice is to call the dependency
+``extension_name_api``.
+
+Example:
+
+.. code-block:: python
+
+    @dependency.provider('example_api')
+    class Manager(manager.Manager):
+
+Routers
+=======
+
+``routers.py`` have the objective of routing the HTTP requests and direct them to
+the correct method within the ``Controllers``. Extension routers are extending
+the ``wsgi.ExtensionRouter``.
+
+Example:
+
+.. code-block:: python
+
+    from keystone.common import wsgi
+    from keystone.contrib.example import controllers
+
+
+    class ExampleRouter(wsgi.ExtensionRouter):
+
+        PATH_PREFIX = '/OS-EXAMPLE'
+
+        def add_routes(self, mapper):
+            example_controller = controllers.ExampleV3Controller()
+            mapper.connect(self.PATH_PREFIX + '/example',
+                           controller=example_controller,
+                           action='do_something',
+                           conditions=dict(method=['GET']))
+
+Controllers
+===========
+
+``controllers.py`` have the objective of handing requests and implement the
+Extension logic. Controllers are consumers of 'Managers' API and must have all
+the dependency injections required. ``Controllers`` are extending the
+``V3Controller`` class.
+
+Example:
+
+.. code-block:: python
+
+    @dependency.requires('identity_api', 'example_api')
+    class ExampleV3Controller(controller.V3Controller):
+        pass
+
+Backends
+========
+
+The ``backends`` folder provides the model implementations for the different
+backends supported by the Extension. See General above for an example directory
+structure.
+
+If a SQL backend is provided, in the ``sql.py`` backend implementation it is
+mandatory to define the new table(s) that the Extension introduces and the
+attributes they are composed of.
+
+For more information on backends, refer to the `Keystone Architecture
+<http://docs.openstack.org/developer/keystone/architecture.html>`_
+documentation.
+
+Example:
+
+.. code-block:: python
+
+    class ExampleSQLBackend(sql.ModelBase, sql.DictBase):
+        """example table description."""
+        __tablename__ = 'example_table'
+        attributes = ['id', 'type', 'extra']
+
+        example_id = sql.Column(sql.String(64),
+                                primary_key=True,
+                                nullable=False)
+        ...
+
+SQL Migration Repository
+========================
+
+In case the Extension is adding SQL data structures, these must be stored in
+separate tables and must not be included in the ``migrate_repo`` of the core
+Keystone. Please refer to the ``migrate.cfg`` file to configure the Extension
+repository.
+
+In order to create the Extension tables and their attributes, a ``db_sync``
+command must be executed.
+
+Example:
+
+.. code-block:: bash
+
+     $ ./bin/keystone-manage db_sync --extension example
+
+Event Callbacks
+---------------
+
+Extensions may provide callbacks to Keystone (Identity) events.
+Extensions must provide the list of events of interest and the corresponding
+callbacks. Events are issued upon successful creation, modification, and
+deletion of the following Keystone resources:
+
+- ``group``
+- ``project``
+- ``role``
+- ``user``
+
+The extension's ``Manager`` class must contain the
+``event_callbacks`` attribute. It is a dictionary listing as keys
+those events that are of interest and the values should be the respective
+callbacks. Event callback registration is done via the
+dependency injection mechanism. During dependency provider registration, the
+``dependency.provider`` decorator looks for the ``event_callbacks``
+class attribute. If it exists the event callbacks are registered
+accordingly. In order to enable event callbacks, the extension's ``Manager``
+class must also be a dependency provider.
+
+Example:
+
+.. code-block:: python
+
+    # Since this is a dependency provider. Any code module using this or any
+    # other dependency provider (uses the dependency.provider decorator)
+    # will be enabled for the attribute based notification
+
+    @dependency.provider('example_api')
+    class ExampleManager(manager.Manager):
+        """Example Manager.
+
+        See :mod:`keystone.common.manager.Manager` for more details on
+        how this dynamically calls the backend.
+
+        """
+
+        def __init__(self):
+            self.event_callbacks = {
+                # Here we add the event_callbacks class attribute that
+                # calls project_deleted_callback when a project is deleted.
+                'deleted': {
+                    'project': [
+                        self.project_deleted_callback]}}
+            super(ExampleManager, self).__init__(
+                'keystone.contrib.example.core.ExampleDriver')
+
+        def project_deleted_callback(self, context, message):
+            # cleanup data related to the deleted project here
+
+A callback must accept the following parameters:
+
+- ``service`` - the service information (e.g. identity)
+- ``resource_type`` - the resource type (e.g. project)
+- ``operation`` - the operation (updated, created, deleted)
+- ``payload`` - the actual payload info of the resource that was acted on
+
+Current callback operations:
+
+- ``created``
+- ``deleted``
+- ``updated``
+
+Example:
+
+.. code-block:: python
+
+      def project_deleted_callback(self, service, resource_type, operation,
+                                   payload):
diff --git a/keystone-moon/doc/source/extensions.rst b/keystone-moon/doc/source/extensions.rst
new file mode 100644 (file)
index 0000000..f3bade9
--- /dev/null
@@ -0,0 +1,161 @@
+..
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+==========
+Extensions
+==========
+
+Status
+======
+
+An extension may be considered ``stable``, ``experimental`` or ``out-of-tree``.
+
+* A `stable` status indicates that an extension is fully supported by the
+  OpenStack Identity team.
+
+* An `experimental` status indicates that although the intention is to keep
+  the API unchanged, we reserve the right to change it up until the point that
+  it is deemed `stable`.
+
+* An `out-of-tree` status indicates that no formal support will be provided.
+
+Graduation Process
+==================
+
+By default, major new functionality that is proposed to be in-tree will start
+off in `experimental` status. Typically it would take at minimum of one cycle
+to transition from `experimental` to `stable`, although in special cases this
+might happened within a cycle.
+
+Removal Process
+===============
+
+It is not intended that functionality should stay in experimental for a long
+period, functionality that stays `experimental` for more than **two** releases
+would be expected to make a transition to either `stable` or `out-of-tree`.
+
+Current Extensions
+==================
+
+------------------
+Endpoint Filtering
+------------------
+
+The Endpoint Filtering extension enables creation of ad-hoc catalogs for each
+project-scoped token request.
+
+.. NOTE:: Support status for Endpoint Filtering
+
+   *Experimental* (Icehouse, Juno)
+   *Stable* (Kilo)
+
+.. toctree::
+   :maxdepth: 1
+
+   extensions/endpoint_filter.rst
+
+* `API Specification for Endpoint Filtering <http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-ep-filter-ext.html>`__
+
+---------------
+Endpoint Policy
+---------------
+
+The Endpoint Policy extension provides associations between service endpoints
+and policies that are already stored in the Identity server and referenced by
+a policy ID.
+
+.. NOTE:: Support status for Endpoint Policy
+
+   *Experimental* (Juno)
+   *Stable* (Kilo)
+
+.. toctree::
+   :maxdepth: 1
+
+   extensions/endpoint_policy.rst
+
+* `API Specification for Endpoint Policy <http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-endpoint-policy.html>`__
+
+----------
+Federation
+----------
+
+The Federation extension provides the ability for users to manage Identity
+Providers (IdPs) and establish a set of rules to map federation protocol
+attributes to Identity API attributes.
+
+.. NOTE:: Support status for Federation
+
+   *Experimental* (Icehouse, Juno)
+   *Stable* (Kilo)
+
+.. toctree::
+   :maxdepth: 1
+
+   extensions/federation.rst
+
+* `API Specification for Federation <http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-federation-ext.html>`__
+
+-------
+Inherit
+-------
+
+The Inherit extension provides the ability for projects to inherit role
+assignments from their owning domain, or from projects higher in the
+hierarchy.
+
+.. NOTE:: Support status for Inherit
+
+   *Experimental* (Havava, Icehouse)
+   *Stable* (Juno)
+
+* `API Specification for Inherit <http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-inherit-ext.html>`__
+
+----------
+OAuth 1.0a
+----------
+
+The OAuth 1.0a extension provides the ability for Identity users to delegate
+roles to third party consumers via the OAuth 1.0a specification.
+
+.. NOTE:: Support status for OAuth 1.0a
+
+   *Experimental* (Havana, Icehouse)
+   *Stable* (Juno)
+
+.. toctree::
+   :maxdepth: 1
+
+   extensions/oauth1.rst
+
+* `API Specification for OAuth 1.0a <http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-oauth1-ext.html>`__
+
+-----------------
+Revocation Events
+-----------------
+
+The Revocation Events extension provides a list of token revocations. Each
+event expresses a set of criteria which describes a set of tokens that are
+no longer valid.
+
+.. NOTE:: Support status for Revocation Events
+
+   *Experimental* (Juno)
+   *Stable* (Kilo)
+
+.. toctree::
+   :maxdepth: 1
+
+   extensions/revoke.rst
+
+* `API Specification for Revocation Events <http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-revoke-ext.html>`__
diff --git a/keystone-moon/doc/source/extensions/endpoint_filter.rst b/keystone-moon/doc/source/extensions/endpoint_filter.rst
new file mode 100644 (file)
index 0000000..6619850
--- /dev/null
@@ -0,0 +1,44 @@
+..
+      Copyright 2011-2013 OpenStack, Foundation
+      All Rights Reserved.
+
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+======================================
+Enabling the Endpoint Filter Extension
+======================================
+
+To enable the endpoint filter extension:
+
+1. Add the endpoint filter extension catalog driver to the ``[catalog]`` section
+   in ``keystone.conf``. For example::
+
+    [catalog]
+    driver = keystone.contrib.endpoint_filter.backends.catalog_sql.EndpointFilterCatalog
+
+2. Add the ``endpoint_filter_extension`` filter to the ``api_v3`` pipeline in
+   ``keystone-paste.ini``. This must be added after ``json_body`` and before
+   the last entry in the pipeline. For example::
+
+    [pipeline:api_v3]
+    pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth json_body ec2_extension_v3 s3_extension simple_cert_extension revoke_extension endpoint_filter_extension service_v3
+
+3. Create the endpoint filter extension tables if using the provided sql backend. For example::
+
+    ./bin/keystone-manage db_sync --extension endpoint_filter
+
+4. Optionally, change ``return_all_endpoints_if_no_filter`` the ``[endpoint_filter]`` section
+   in ``keystone.conf`` to return an empty catalog if no associations are made. For example::
+
+    [endpoint_filter]
+    return_all_endpoints_if_no_filter = False
diff --git a/keystone-moon/doc/source/extensions/endpoint_policy.rst b/keystone-moon/doc/source/extensions/endpoint_policy.rst
new file mode 100644 (file)
index 0000000..86ff226
--- /dev/null
@@ -0,0 +1,35 @@
+..
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+======================================
+Enabling the Endpoint Policy Extension
+======================================
+
+To enable the endpoint policy extension:
+
+1. Optionally, add the endpoint policy extension driver to the
+   ``[endpoint_policy]`` section in ``keystone.conf``. For example::
+
+    [endpoint_policy]
+    driver = keystone.contrib.endpoint_policy.backends.sql.EndpointPolicy
+
+2. Add the ``endpoint_policy_extension`` policy to the ``api_v3`` pipeline in
+   ``keystone-paste.ini``. This must be added after ``json_body`` and before
+   the last entry in the pipeline. For example::
+
+    [pipeline:api_v3]
+    pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth json_body ec2_extension_v3 s3_extension simple_cert_extension revoke_extension service_v3 endpoint_policy_extension service_v3
+
+3. Create the endpoint policy extension tables if using the provided SQL backend. For example::
+
+    ./bin/keystone-manage db_sync --extension endpoint_policy
diff --git a/keystone-moon/doc/source/extensions/federation.rst b/keystone-moon/doc/source/extensions/federation.rst
new file mode 100644 (file)
index 0000000..f1b5baa
--- /dev/null
@@ -0,0 +1,66 @@
+..
+      Copyright 2014 OpenStack, Foundation
+      All Rights Reserved.
+
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+==================================
+Enabling the Federation Extension
+==================================
+
+To enable the federation extension:
+
+1. Add the federation extension driver to the ``[federation]`` section in
+   ``keystone.conf``. For example::
+
+       [federation]
+       driver = keystone.contrib.federation.backends.sql.Federation
+
+2. Add the ``saml2`` and/or ``oidc`` authentication methods to the ``[auth]``
+   section in ``keystone.conf``::
+
+       [auth]
+       methods = external,password,token,saml2,oidc
+       saml2 = keystone.auth.plugins.mapped.Mapped
+       oidc = keystone.auth.plugins.mapped.Mapped
+
+.. NOTE::
+    The ``external`` method should be dropped to avoid any interference with
+    some Apache + Shibboleth SP setups, where a ``REMOTE_USER`` env variable is
+    always set, even as an empty value.
+
+3. Add the ``federation_extension`` middleware to the ``api_v3`` pipeline in
+   ``keystone-paste.ini``. This must be added after ``json_body`` and before
+   the last entry in the pipeline. For example::
+
+       [pipeline:api_v3]
+       pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth json_body ec2_extension_v3 s3_extension simple_cert_extension revoke_extension federation_extension service_v3
+
+4. Create the federation extension tables if using the provided SQL backend.
+   For example::
+
+       ./bin/keystone-manage db_sync --extension federation
+
+5. As of the Juno release, multiple Keystone deployments can now be federated.
+   To do so, the `pysaml2 <https://pypi.python.org/pypi/pysaml2>`_ library is
+   required. Since OS-FEDERATION is an extension, ``pysaml2`` is not installed
+   by default, it must be installed manually. For example::
+
+        pip install --upgrade $(grep pysaml2 test-requirements.txt)
+
+   Also, the `xmlsec1` command line tool is needed to sign the SAML assertions
+   generated by the Keystone Identity Provider:
+
+   .. code-block:: bash
+
+        $ apt-get install xmlsec1
diff --git a/keystone-moon/doc/source/extensions/moon.rst b/keystone-moon/doc/source/extensions/moon.rst
new file mode 100644 (file)
index 0000000..fc86267
--- /dev/null
@@ -0,0 +1,145 @@
+..
+      Copyright 2015 Orange
+      All Rights Reserved.
+
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+============
+Moon backend
+============
+
+Before doing anything, you must test your installation and check that your infrastructure is working.
+For example, check that you can create new virtual machines with admin and demo login.
+
+Configuration
+-------------
+
+Moon is a contribute backend so you have to enable it by modifying /etc/keystone/keystone-paste.ini, like this:
+
+.. code-block:: ini
+
+    [filter:moon]
+    paste.filter_factory = keystone.contrib.moon.routers:Admin.factory
+
+    ...
+
+    [pipeline:public_api]
+    # The last item in this pipeline must be public_service or an equivalent
+    # application. It cannot be a filter.
+    pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth json_body ec2_extension user_crud_extension moon public_service
+
+    [pipeline:admin_api]
+    # The last item in this pipeline must be admin_service or an equivalent
+    # application. It cannot be a filter.
+    pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth json_body ec2_extension s3_extension crud_extension moon admin_service
+
+    [pipeline:api_v3]
+    # The last item in this pipeline must be service_v3 or an equivalent
+    # application. It cannot be a filter.
+    pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth json_body ec2_extension_v3 s3_extension simple_cert_extension revoke_extension moon service_v3
+
+    ...
+
+You must modify /etc/keystone/keystone.conf as you need (see at the end of the file) and copy the following directories:
+
+.. code-block:: sh
+
+    cp -R /opt/stack/keystone/examples/moon/policies/ /etc/keystone/
+    cp -R /opt/stack/keystone/examples/moon/super_extension/ /etc/keystone/
+
+You can now update the Keystone database and create the directory for logs and restart the Keystone service:
+
+.. code-block:: sh
+
+    cd /opt/stack/keystone
+    ./bin/keystone-manage db_sync --extension moon
+    sudo mkdir /var/log/moon/
+    sudo chown vagrant /var/log/moon/
+    sudo service apache2 restart
+
+You have to install our version of keystonemiddleware https://github.com/rebirthmonkey/keystonemiddleware :
+
+.. code-block:: sh
+
+    cd
+    git clone https://github.com/rebirthmonkey/keystonemiddleware.git
+    cd keystonemiddleware
+    sudo python setup.py install
+
+At this time, the only method to configure Moon is to use the python-moonclient which is a console based client:
+
+.. code-block:: sh
+
+    cd
+    git clone https://github.com/rebirthmonkey/moonclient.git
+    cd moonclient
+    sudo python setup.py install
+
+If afterwards, you have some problem restarting nova-api, try removing the package python-six:
+
+.. code-block:: sh
+
+    sudo apt-get remove python-six
+
+
+Nova must be configured to send request to Keystone, you have to modify /etc/nova/api-paste.ini :
+
+.. code-block:: ini
+
+    ...
+
+    [composite:openstack_compute_api_v2]
+    use = call:nova.api.auth:pipeline_factory
+    noauth = compute_req_id faultwrap sizelimit noauth ratelimit osapi_compute_app_v2
+    noauth2 = compute_req_id faultwrap sizelimit noauth2 ratelimit osapi_compute_app_v2
+    keystone = compute_req_id faultwrap sizelimit authtoken keystonecontext moon ratelimit osapi_compute_app_v2
+    keystone_nolimit = compute_req_id faultwrap sizelimit authtoken keystonecontext moon osapi_compute_app_v2
+
+    [composite:openstack_compute_api_v21]
+    use = call:nova.api.auth:pipeline_factory_v21
+    noauth = compute_req_id faultwrap sizelimit noauth osapi_compute_app_v21
+    noauth2 = compute_req_id faultwrap sizelimit noauth2 osapi_compute_app_v21
+    keystone = compute_req_id faultwrap sizelimit authtoken keystonecontext moon osapi_compute_app_v21
+
+    [composite:openstack_compute_api_v3]
+    use = call:nova.api.auth:pipeline_factory_v21
+    noauth = request_id faultwrap sizelimit noauth_v3 osapi_compute_app_v3
+    noauth2 = request_id faultwrap sizelimit noauth_v3 osapi_compute_app_v3
+    keystone = request_id faultwrap sizelimit authtoken keystonecontext moon osapi_compute_app_v3
+
+    ...
+
+    [filter:moon]
+    paste.filter_factory = keystonemiddleware.authz:filter_factory
+
+If Swift is also installed, you have to configured it, in /etc/swift/proxy-server.conf :
+
+.. code-block:: ini
+
+    ...
+
+    [pipeline:main]
+    pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit crossdomain authtoken keystoneauth tempauth  formpost staticweb container-quotas account-quotas slo dlo proxy-logging moon proxy-server
+
+    ...
+
+    [filter:moon]
+    paste.filter_factory = keystonemiddleware.authz:filter_factory
+
+Nova and Swift must be restarted after that, depending on your configuration, you will have to use 'screen' (if using devstack)
+or 'service' on those daemons : nova-api and swift-proxy
+
+Usage
+-----
+
+TODO
\ No newline at end of file
diff --git a/keystone-moon/doc/source/extensions/moon_api.rst b/keystone-moon/doc/source/extensions/moon_api.rst
new file mode 100644 (file)
index 0000000..1f7ad10
--- /dev/null
@@ -0,0 +1,628 @@
+Moon API
+========
+
+Here are Moon API with some examples of posted data and returned data.
+
+Intra-Extension API
+-------------------
+
+Authz
+~~~~~
+
+* ``GET     /OS-MOON/authz/{tenant_id}/{subject_id}/{object_id}/{action_id}``
+
+.. code-block:: json
+
+               return = {
+                            "authz": "OK/KO/OutOfScope",
+                            "tenant_id": "tenant_id",
+                            "subject_id": "subject_id",
+                            "object_id": "object_id",
+                            "action_id": "action_id"
+                        }
+
+Intra_Extension
+~~~~~~~~~~~~~~~
+
+* ``GET     /OS-MOON/authz_policies``
+
+.. code-block:: json
+
+               return = {
+                            "authz_policies": ["policy_name1", "policy_name2"]
+                        }
+
+* ``GET     /OS-MOON/intra_extensions``
+
+.. code-block:: json
+
+               return = {
+                            "intra_extensions": ["ie_uuid1", "ie_uuid2"]
+                        }
+
+* ``GET     /OS-MOON/intra_extensions/{intra_extensions_id}``
+
+.. code-block:: json
+
+               return = {
+                            "intra_extensions": {
+                                "id": "uuid1",
+                                "description": "",
+                                "tenant": "tenant_uuid",
+                                "model": "",
+                                "genre": "",
+                                "authz": {},
+                                "admin": {}
+                            }
+                        }
+
+* ``POST    /OS-MOON/intra_extensions``
+
+.. code-block:: json
+
+                 post = {
+                            "name" : "",
+                            "policymodel": "",
+                            "description": ""
+                        }
+               return = {
+                            "id": "uuid1",
+                            "description": "",
+                            "tenant": "tenant_uuid",
+                            "model": "",
+                            "genre": "",
+                            "authz": {},
+                            "admin": {}
+                        }
+
+* ``DELETE  /OS-MOON/intra_extensions/{intra_extensions_id}``
+
+* ``GET     /OS-MOON/intra_extensions/{intra_extensions_id}/tenant``
+
+.. code-block:: json
+
+               return = {
+                            "tenant": "tenant_id"
+                        }
+
+* ``POST    /OS-MOON/intra_extensions/{intra_extensions_id}/tenant``
+
+.. code-block:: json
+
+                 post = {
+                            "tenant_id": "tenant_id"
+                        }
+               return = {
+                            "tenant": "tenant_id"
+                        }
+
+* ``DELETE  /OS-MOON/intra_extensions/{intra_extensions_id}/tenant/{tenant_id}``
+
+Perimeter
+~~~~~~~~~
+
+* ``GET     /OS-MOON/intra_extensions/{intra_extensions_id}/subjects``
+
+.. code-block:: json
+
+               return = {
+                            "subjects": ["sub_uuid1", "sub_uuid2"]
+                        }
+
+* ``POST    /OS-MOON/intra_extensions/{intra_extensions_id}/subjects``
+
+.. code-block:: json
+
+                 post = {
+                            "subject_id" : ""
+                        }
+               return = {
+                            "subjects": ["sub_uuid1", "sub_uuid2"]
+                        }
+
+* ``DELETE  /OS-MOON/intra_extensions/{intra_extensions_id}/subject/{subject_id}``
+
+* ``GET     /OS-MOON/intra_extensions/{intra_extensions_id}/objects``
+
+.. code-block:: json
+
+               return = {
+                            "objects": ["obj_uuid1", "obj_uuid2"]
+                        }
+
+* ``POST    /OS-MOON/intra_extensions/{intra_extensions_id}/objects``
+
+.. code-block:: json
+
+                 post = {
+                            "object_id" : ""
+                        }
+               return = {
+                            "objects": ["obj_uuid1", "obj_uuid2"]
+                        }
+
+* ``DELETE  /OS-MOON/intra_extensions/{intra_extensions_id}/object/{object_id}``
+
+* ``GET     /OS-MOON/intra_extensions/{intra_extensions_id}/actions``
+
+.. code-block:: json
+
+               return = {
+                            "actions": ["act_uuid1", "act_uuid2"]
+                        }
+
+* ``POST    /OS-MOON/intra_extensions/{intra_extensions_id}/actions``
+
+.. code-block:: json
+
+                 post = {
+                            "action_id" : ""
+                        }
+               return = {
+                            "actions": ["act_uuid1", "act_uuid2"]
+                        }
+
+* ``DELETE  /OS-MOON/intra_extensions/{intra_extensions_id}/actions/{action_id}``
+
+Assignment
+~~~~~~~~~~
+
+* ``GET     /OS-MOON/intra_extensions/{intra_extensions_id}/subject_assignments``
+
+.. code-block:: json
+
+               return = {
+                            "subject_assignments": {
+                                "subject_security_level":{
+                                    "user1": ["low"],
+                                    "user2": ["medium"],
+                                    "user3": ["high"]
+                            }
+                        }
+
+* ``POST    /OS-MOON/intra_extensions/{intra_extensions_id}/subject_assignments``
+
+.. code-block:: json
+
+                 post = {
+                            "subject_id" : "",
+                            "subject_category_id" : "",
+                            "subject_category_scope_id" : ""
+                        }
+               return = {
+                            "subject_assignments": {
+                                "subject_security_level":{
+                                    "user1": ["low"],
+                                    "user2": ["medium"],
+                                    "user3": ["high"]
+                            }
+                        }
+
+* ``DELETE  /OS-MOON/intra_extensions/{intra_extensions_id}/subject_assignments/{subject_category}/{subject_id}/{subject_scope}``
+
+* ``GET     /OS-MOON/intra_extensions/{intra_extensions_id}/object_assignments``
+
+.. code-block:: json
+
+               return = {
+                            "object_assignments": {
+                                "object_security_level":{
+                                    "vm1": ["low"],
+                                    "vm2": ["medium"],
+                                    "vm3": ["high"]
+                            }
+                        }
+
+* ``POST    /OS-MOON/intra_extensions/{intra_extensions_id}/object_assignments``
+
+.. code-block:: json
+
+                 post = {
+                            "object_id" : "",
+                            "object_category_id" : "",
+                            "object_category_scope_id" : ""
+                        }
+               return = {
+                            "object_assignments": {
+                                "object_security_level":{
+                                    "vm1": ["low"],
+                                    "vm2": ["medium"],
+                                    "vm3": ["high"]
+                            }
+                        }
+
+* ``DELETE  /OS-MOON/intra_extensions/{intra_extensions_id}/object_assignments/{object_category}/{object_id}/{object_scope}``
+
+* ``GET     /OS-MOON/intra_extensions/{intra_extensions_id}/action_assignments``
+
+.. code-block:: json
+
+               return = {
+                            "action_assignments": {
+                                "computing_action":{
+                                    "pause": ["vm_admin"],
+                                    "unpause": ["vm_admin"],
+                                    "start": ["vm_admin"],
+                                    "stop": ["vm_admin"]
+                            }
+                        }
+
+* ``POST    /OS-MOON/intra_extensions/{intra_extensions_id}/action_assignments``
+
+.. code-block:: json
+
+                 post = {
+                            "action_id" : "",
+                            "action_category_id" : "",
+                            "action_category_scope_id" : ""
+                        }
+               return = {
+                            "action_assignments": {
+                                "computing_action":{
+                                    "pause": ["vm_admin"],
+                                    "unpause": ["vm_admin"],
+                                    "start": ["vm_admin"],
+                                    "stop": ["vm_admin"]
+                            }
+                        }
+
+* ``DELETE  /OS-MOON/intra_extensions/{intra_extensions_id}/action_assignments/{action_category}/{action_id}/{action_scope}``
+
+Metadata
+~~~~~~~~
+
+* ``GET     /OS-MOON/intra_extensions/{intra_extensions_id}/subject_categories``
+
+.. code-block:: json
+
+               return = {
+                            "subject_categories": [ "subject_security_level" ]
+                        }
+
+* ``POST    /OS-MOON/intra_extensions/{intra_extensions_id}/subject_categories``
+
+.. code-block:: json
+
+                 post = {
+                            "subject_category_id" : ""
+                        }
+               return = {
+                            "subject_categories": [ "subject_security_level" ]
+                        }
+
+* ``DELETE  /OS-MOON/intra_extensions/{intra_extensions_id}/subject_categories/{subject_category_id}``
+
+* ``GET     /OS-MOON/intra_extensions/{intra_extensions_id}/object_categories``
+
+.. code-block:: json
+
+               return = {
+                            "object_categories": [ "object_security_level" ]
+                        }
+
+* ``POST    /OS-MOON/intra_extensions/{intra_extensions_id}/object_categories``
+
+.. code-block:: json
+
+                 post = {
+                            "object_category_id" : ""
+                        }
+               return = {
+                            "object_categories": [ "object_security_level" ]
+                        }
+
+* ``DELETE  /OS-MOON/intra_extensions/{intra_extensions_id}/object_categories/{object_category_id}``
+
+* ``GET     /OS-MOON/intra_extensions/{intra_extensions_id}/action_categories``
+
+.. code-block:: json
+
+               return = {
+                            "action_categories": [ "computing_action" ]
+                        }
+
+
+* ``POST    /OS-MOON/intra_extensions/{intra_extensions_id}/action_categories``
+
+.. code-block:: json
+
+                 post = {
+                            "action_category_id" : ""
+                        }
+               return = {
+                            "action_categories": [ "computing_action" ]
+                        }
+
+* ``DELETE  /OS-MOON/intra_extensions/{intra_extensions_id}/action_categories/{action_category_id}``
+
+Scope
+~~~~~
+
+* ``GET     /OS-MOON/intra_extensions/{intra_extensions_id}/subject_category_scope``
+
+.. code-block:: json
+
+               return = {
+                            "subject_security_level": [ "high", "medium", "low" ]
+                        }
+
+* ``POST    /OS-MOON/intra_extensions/{intra_extensions_id}/subject_category_scope``
+
+.. code-block:: json
+
+                 post = {
+                            "subject_category_id" : "",
+                            "subject_category_scope_id" : ""
+                        }
+               return = {
+                            "subject_security_level": [ "high", "medium", "low" ]
+                        }
+
+* ``DELETE  /OS-MOON/intra_extensions/{intra_extensions_id}/subject_category_scope/{subject_category}/{subject_scope}``
+
+* ``GET     /OS-MOON/intra_extensions/{intra_extensions_id}/object_category_scope``
+
+.. code-block:: json
+
+               return = {
+                            "object_security_level": [ "high", "medium", "low" ]
+                        }
+
+* ``POST    /OS-MOON/intra_extensions/{intra_extensions_id}/object_category_scope``
+
+.. code-block:: json
+
+                 post = {
+                            "object_category_id" : "",
+                            "object_category_scope_id" : ""
+                        }
+               return = {
+                            "object_security_level": [ "high", "medium", "low" ]
+                        }
+
+* ``DELETE  /OS-MOON/intra_extensions/{intra_extensions_id}/object_category_scope/{object_category}/{object_scope}``
+
+* ``GET     /OS-MOON/intra_extensions/{intra_extensions_id}/action_category_scope``
+
+.. code-block:: json
+
+               return = {
+                            "computing_action": [ "vm_admin", "vm_access" ]
+                        }
+
+* ``POST    /OS-MOON/intra_extensions/{intra_extensions_id}/action_category_scope``
+
+.. code-block:: json
+
+                 post = {
+                            "action_id" : "",
+                            "action_category_id" : "",
+                            "action_category_scope_id" : ""
+                        }
+               return = {
+                            "computing_action": [ "vm_admin", "vm_access" ]
+                        }
+
+* ``DELETE  /OS-MOON/intra_extensions/{intra_extensions_id}/action_category_scope/{action_category}/{action_scope}``
+
+Metarule
+~~~~~~~~
+
+* ``GET     /OS-MOON/intra_extensions/{intra_extensions_id}/aggregation_algorithms``
+
+.. code-block:: json
+
+               return = {
+                            "aggregation_algorithms": [ "and_true_aggregation", "..."]
+                        }
+
+* ``GET     /OS-MOON/intra_extensions/{intra_extensions_id}/aggregation_algorithm``
+
+.. code-block:: json
+
+               return = {
+                            "aggregation_algorithm": "and_true_aggregation"
+                        }
+
+* ``POST    /OS-MOON/intra_extensions/{intra_extensions_id}/aggregation_algorithm``
+
+.. code-block:: json
+
+                 post = {
+                            "aggregation": "and_true_aggregation"
+                        }
+               return = {
+                            "aggregation_algorithm": "and_true_aggregation"
+                        }
+
+* ``GET     /OS-MOON/intra_extensions/{intra_extensions_id}/sub_meta_rule``
+
+.. code-block:: json
+
+               return = {
+                            "sub_meta_rule": {
+                                "subject_categories": ["role"],
+                                "action_categories": ["ie_action"],
+                                "object_categories": ["id"],
+                                "relation": "relation_super"
+                            }
+                        }
+
+* ``POST    /OS-MOON/intra_extensions/{intra_extensions_id}/sub_meta_rule``
+
+.. code-block:: json
+
+                 post = {
+                            "relation_super": {
+                                "subject_categories": ["role"],
+                                "action_categories": ["ie_action"],
+                                "object_categories": ["id"],
+                            }
+                        }
+               return = {
+                            "sub_meta_rule": {
+                                "subject_categories": ["role"],
+                                "action_categories": ["ie_action"],
+                                "object_categories": ["id"],
+                                "relation": "relation_super"
+                            }
+                        }
+
+* ``GET     /OS-MOON/intra_extensions/{intra_extensions_id}/sub_meta_rule_relations``
+
+.. code-block:: json
+
+               return = {
+                            "sub_meta_rule_relations": ["relation_super", ]
+                        }
+
+Rules
+~~~~~
+
+* ``GET     /OS-MOON/intra_extensions/{intra_extensions_id}/sub_rules``
+
+.. code-block:: json
+
+               return = {
+                            "sub_rules": {
+                                  "relation_super": [
+                                      ["high", "vm_admin", "medium"],
+                                      ["high", "vm_admin", "low"],
+                                      ["medium", "vm_admin", "low"],
+                                      ["high", "vm_access", "high"],
+                                      ["high", "vm_access", "medium"],
+                                      ["high", "vm_access", "low"],
+                                      ["medium", "vm_access", "medium"],
+                                      ["medium", "vm_access", "low"],
+                                      ["low", "vm_access", "low"]
+                                  ]
+                            }
+                        }
+
+* ``POST    /OS-MOON/intra_extensions/{intra_extensions_id}/sub_rules``
+
+.. code-block:: json
+
+                 post = {
+                            "rules": ["admin", "vm_admin", "servers"],
+                            "relation": "relation_super"
+                        }
+
+* ``DELETE  /OS-MOON/intra_extensions/{intra_extensions_id}/sub_rules/{relation_name}/{rule}``
+
+
+Tenant mapping API
+------------------
+
+* ``GET  /OS-MOON/tenants``
+
+.. code-block:: json
+
+               return = {
+                            "tenant": {
+                                "uuid1": {
+                                    "name": "tenant1",
+                                    "authz": "intra_extension_uuid1",
+                                    "admin": "intra_extension_uuid2"
+                                },
+                                "uuid2": {
+                                    "name": "tenant2",
+                                    "authz": "intra_extension_uuid1",
+                                    "admin": "intra_extension_uuid2"
+                                }
+                            }
+                        }
+
+* ``GET  /OS-MOON/tenant/{tenant_uuid}``
+
+.. code-block:: json
+
+               return = {
+                            "tenant": {
+                                "uuid": {
+                                    "name": "tenant1",
+                                    "authz": "intra_extension_uuid1",
+                                    "admin": "intra_extension_uuid2"
+                                }
+                            }
+                        }
+
+* ``POST  /OS-MOON/tenant``
+
+.. code-block:: json
+
+                 post = {
+                            "id": "uuid",
+                            "name": "tenant1",
+                            "authz": "intra_extension_uuid1",
+                            "admin": "intra_extension_uuid2"
+                        }
+               return = {
+                            "tenant": {
+                                "uuid": {
+                                    "name": "tenant1",
+                                    "authz": "intra_extension_uuid1",
+                                    "admin": "intra_extension_uuid2"
+                                }
+                            }
+                        }
+
+* ``DELETE  /OS-MOON/tenant/{tenant_uuid}/{intra_extension_uuid}``
+
+.. code-block:: json
+
+               return = {}
+
+Logs API
+--------
+
+* ``GET  /OS-MOON/logs``
+
+InterExtension API
+------------------
+
+* ``GET     /OS-MOON/inter_extensions``
+
+.. code-block:: json
+
+               return = {
+                            "inter_extensions": ["ie_uuid1", "ie_uuid2"]
+                        }
+
+* ``GET     /OS-MOON/inter_extensions/{inter_extensions_id}``
+
+.. code-block:: json
+
+               return = {
+                            "inter_extensions": {
+                                "id": "uuid1",
+                                "description": "",
+                                "requesting_intra_extension_uuid": "uuid1",
+                                "requested_intra_extension_uuid": "uuid2",
+                                "genre": "trust_OR_coordinate",
+                                "virtual_entity_uuid": "ve_uuid1"
+                            }
+                        }
+
+* ``POST    /OS-MOON/inter_extensions``
+
+.. code-block:: json
+
+                 post = {
+                            "description": "",
+                            "requesting_intra_extension_uuid": uuid1,
+                            "requested_intra_extension_uuid": uuid2,
+                            "genre": "trust_OR_coordinate",
+                            "virtual_entity_uuid": "ve_uuid1"
+                        }
+               return = {
+                            "id": "uuid1",
+                            "description": "",
+                            "requesting_intra_extension_uuid": uuid1,
+                            "requested_intra_extension_uuid": uuid2,
+                            "genre": "trust_OR_coordinate",
+                            "virtual_entity_uuid": "ve_uuid1"
+                        }
+
+* ``DELETE  /OS-MOON/inter_extensions/{inter_extensions_id}``
+
diff --git a/keystone-moon/doc/source/extensions/oauth1.rst b/keystone-moon/doc/source/extensions/oauth1.rst
new file mode 100644 (file)
index 0000000..c89ee12
--- /dev/null
@@ -0,0 +1,50 @@
+..
+      Copyright 2011-2013 OpenStack, Foundation
+      All Rights Reserved.
+
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+=============================
+Enabling the OAuth1 Extension
+=============================
+
+To enable the OAuth1 extension:
+
+1. Optionally, add the oauth1 extension driver to the ``[oauth1]`` section in ``keystone.conf``. For example::
+
+    [oauth1]
+    driver = keystone.contrib.oauth1.backends.sql.OAuth1
+
+2. Add the ``oauth1`` authentication method to the ``[auth]`` section in ``keystone.conf``::
+
+    [auth]
+    methods = external,password,token,oauth1
+    oauth1 = keystone.auth.plugins.oauth1.OAuth
+
+3. Add the ``oauth1_extension`` filter to the ``api_v3`` pipeline in
+   ``keystone-paste.ini``. This must be added after ``json_body`` and before
+   the last entry in the pipeline. For example::
+
+    [pipeline:api_v3]
+    pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth json_body ec2_extension_v3 s3_extension simple_cert_extension revoke_extension oauth1_extension service_v3
+
+4. Create the OAuth1 extension tables if using the provided SQL backend. For example::
+
+    ./bin/keystone-manage db_sync --extension oauth1
+
+5. Optionally, if deploying under an HTTPD server (i.e. Apache), set the
+   `WSGIPassAuthorization` to allow the OAuth Authorization headers to
+   pass through `mod_wsgi`. For example, add the following to the Keystone
+   virtual host file::
+
+    WSGIPassAuthorization On
diff --git a/keystone-moon/doc/source/extensions/openidc.rst b/keystone-moon/doc/source/extensions/openidc.rst
new file mode 100644 (file)
index 0000000..f515309
--- /dev/null
@@ -0,0 +1,93 @@
+:orphan:
+
+..
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+====================
+Setup OpenID Connect
+====================
+
+Configuring mod_auth_openidc
+============================
+
+Federate Keystone (SP) and an external IdP using OpenID Connect (`mod_auth_openidc`_)
+
+.. _`mod_auth_openidc`: https://github.com/pingidentity/mod_auth_openidc
+
+To install `mod_auth_openidc` on Ubuntu, perform the following:
+
+.. code-block:: bash
+
+  sudo apt-get install libapache2-mod-auth-openidc
+
+Note that this module is not available on Fedora/CentOS/Red Hat.
+
+In the keystone Apache site file, add the following as a top level option, to
+load the `mod_auth_openidc` module:
+
+.. code-block:: xml
+
+  LoadModule auth_openidc_module /usr/lib/apache2/modules/mod_auth_openidc.so
+
+Also within the same file, locate the virtual host entry and add the following
+entries for OpenID Connect:
+
+.. code-block:: xml
+
+  <VirtualHost *:5000>
+
+      ...
+
+      OIDCClaimPrefix "OIDC-"
+      OIDCResponseType "id_token"
+      OIDCScope "openid email profile"
+      OIDCProviderMetadataURL <url_of_provider_metadata>
+      OIDCClientID <openid_client_id>
+      OIDCClientSecret <openid_client_secret>
+      OIDCCryptoPassphrase openstack
+      OIDCRedirectURI http://localhost:5000/v3/OS-FEDERATION/identity_providers/<idp_id>/protocols/oidc/auth/redirect
+
+      <LocationMatch /v3/OS-FEDERATION/identity_providers/.*?/protocols/oidc/auth>
+        AuthType openid-connect
+        Require valid-user
+        LogLevel debug
+      </LocationMatch>
+  </VirtualHost>
+
+Note an example of an `OIDCProviderMetadataURL` instance is: https://accounts.google.com/.well-known/openid-configuration
+If not using `OIDCProviderMetadataURL`, then the following attributes
+must be specified: `OIDCProviderIssuer`, `OIDCProviderAuthorizationEndpoint`,
+`OIDCProviderTokenEndpoint`, `OIDCProviderTokenEndpointAuth`,
+`OIDCProviderUserInfoEndpoint`, and `OIDCProviderJwksUri`
+
+Note, if using a mod_wsgi version less than 4.3.0, then the `OIDCClaimPrefix`
+must be specified to have only alphanumerics or a dash ("-"). This is because
+mod_wsgi blocks headers that do not fit this criteria. See http://modwsgi.readthedocs.org/en/latest/release-notes/version-4.3.0.html#bugs-fixed
+for more details
+
+Once you are done, restart your Apache daemon:
+
+.. code-block:: bash
+
+    $ service apache2 restart
+
+Tips
+====
+
+1. When creating a mapping, note that the 'remote' attributes will be prefixed,
+   with `HTTP_`, so for instance, if you set OIDCClaimPrefix to `OIDC-`, then a
+   typical remote value to check for is: `HTTP_OIDC_ISS`.
+
+2. Don't forget to add oidc as an [auth] plugin in keystone.conf, see `Step 2`_
+
+.. _`Step 2`: federation.html
\ No newline at end of file
diff --git a/keystone-moon/doc/source/extensions/revoke.rst b/keystone-moon/doc/source/extensions/revoke.rst
new file mode 100644 (file)
index 0000000..e8a25ce
--- /dev/null
@@ -0,0 +1,45 @@
+    ..
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+=================================
+Enabling the Revocation Extension
+=================================
+
+.. NOTE::
+
+    As of the Juno release, the example configuration files will have the
+    ``OS-REVOKE`` extension enabled by default, thus it is not necessary to
+    perform steps 1 and 2.
+    Also, for new installations, the revocation extension tables are already
+    migrated, thus it is not necessary to perform steps 3.
+
+1. Optionally, add the revoke extension driver to the ``[revoke]`` section
+   in ``keystone.conf``. For example::
+
+    [revoke]
+    driver = keystone.contrib.revoke.backends.sql.Revoke
+
+2. Add the required ``filter`` to the ``pipeline`` in ``keystone-paste.ini``.
+   This must be added after ``json_body`` and before the last entry in the
+   pipeline. For example::
+
+    [filter:revoke_extension]
+    paste.filter_factory = keystone.contrib.revoke.routers:RevokeExtension.factory
+
+    [pipeline:api_v3]
+    pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth json_body ec2_extension_v3 s3_extension simple_cert_extension revoke_extension service_v3
+
+3. Create the revocation extension tables if using the provided SQL backend.
+   For example::
+
+    ./bin/keystone-manage db_sync --extension revoke
diff --git a/keystone-moon/doc/source/extensions/shibboleth.rst b/keystone-moon/doc/source/extensions/shibboleth.rst
new file mode 100644 (file)
index 0000000..d67cfa1
--- /dev/null
@@ -0,0 +1,279 @@
+:orphan:
+
+..
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+================
+Setup Shibboleth
+================
+
+Configure Apache HTTPD for mod_shibboleth
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Follow the steps outlined at: `Running Keystone in HTTPD`_.
+
+.. _`Running Keystone in HTTPD`: ../apache-httpd.html
+
+You'll also need to install `Shibboleth <https://wiki.shibboleth.net/confluence/display/SHIB2/Home>`_, for
+example:
+
+.. code-block:: bash
+
+    $ apt-get install libapache2-mod-shib2
+
+Configure your Keystone virtual host and adjust the config to properly handle SAML2 workflow:
+
+Add *WSGIScriptAlias* directive to your vhost configuration::
+
+    WSGIScriptAliasMatch ^(/v3/OS-FEDERATION/identity_providers/.*?/protocols/.*?/auth)$ /var/www/keystone/main/$1
+
+Make sure the *wsgi-keystone.conf* contains a *<Location>* directive for the Shibboleth module and
+a *<Location>* directive for each identity provider::
+
+    <Location /Shibboleth.sso>
+        SetHandler shib
+    </Location>
+
+    <Location /v3/OS-FEDERATION/identity_providers/idp_1/protocols/saml2/auth>
+        ShibRequestSetting requireSession 1
+        ShibRequestSetting applicationId idp_1
+        AuthType shibboleth
+        ShibRequireAll On
+        ShibRequireSession On
+        ShibExportAssertion Off
+        Require valid-user
+    </Location>
+
+.. NOTE::
+    * ``saml2`` may be different in your deployment, but do not use a wildcard value.
+      Otherwise *every* federated protocol will be handled by Shibboleth.
+    * ``idp_1`` has to be replaced with the name associated with the idp in Keystone.
+      The same name is used inside the shibboleth2.xml configuration file but they could
+      be different.
+    * The ``ShibRequireSession`` and ``ShibRequireAll`` rules are invalid in
+      Apache 2.4+ and should be dropped in that specific setup.
+    * You are advised to carefully examine `Shibboleth Apache configuration
+      documentation
+      <https://wiki.shibboleth.net/confluence/display/SHIB2/NativeSPApacheConfig>`_
+
+Enable the Keystone virtual host, for example:
+
+.. code-block:: bash
+
+    $ a2ensite wsgi-keystone.conf
+
+Enable the ``ssl`` and ``shib2`` modules, for example:
+
+.. code-block:: bash
+
+    $ a2enmod ssl
+    $ a2enmod shib2
+
+Restart Apache, for example:
+
+.. code-block:: bash
+
+    $ service apache2 restart
+
+Configuring shibboleth2.xml
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Once you have your Keystone vhost (virtual host) ready, it's then time to
+configure Shibboleth and upload your Metadata to the Identity Provider.
+
+If new certificates are required, they can be easily created by executing:
+
+.. code-block:: bash
+
+    $ shib-keygen -y <number of years>
+
+The newly created file will be stored under ``/etc/shibboleth/sp-key.pem``
+
+You should fetch your Service Provider's Metadata file. Typically this can be
+achieved by simply fetching a Metadata file, for example:
+
+.. code-block:: bash
+
+    $ wget --no-check-certificate -O <name of the file> https://service.example.org/Shibboleth.sso/Metadata
+
+Upload your Service Provider's Metadata file to your Identity Provider.
+This step depends on your Identity Provider choice and is not covered here.
+
+Configure your Service Provider by editing ``/etc/shibboleth/shibboleth2.xml``
+file. You are advised to examine `Shibboleth Service Provider Configuration documentation <https://wiki.shibboleth.net/confluence/display/SHIB2/Configuration>`_
+
+An example of your ``/etc/shibboleth/shibboleth2.xml`` may look like
+(The example shown below is for reference only, not to be used in a production
+environment):
+
+.. code-block:: xml
+
+    <!--
+    File configuration courtesy of http://testshib.org
+
+    More  information:
+    https://wiki.shibboleth.net/confluence/display/SHIB2/NativeSPConfiguration
+    -->
+
+    <SPConfig xmlns="urn:mace:shibboleth:2.0:native:sp:config"
+    xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata" clockSkew="1800 ">
+
+        <!-- The entityID is the name TestShib made for your SP. -->
+        <ApplicationDefaults entityID="https://<yourhosthere>/shibboleth">
+
+            <!--
+            You should use secure cookies if at all possible.
+            See cookieProps in this Wiki article.
+            -->
+            <!-- https://wiki.shibboleth.net/confluence/display/SHIB2/NativeSPSessions  -->
+            <Sessions lifetime="28800" timeout="3600" checkAddress="false"
+            relayState="ss:mem" handlerSSL="false">
+
+                <!-- Triggers a login request directly to the TestShib IdP. -->
+                <!-- https://wiki.shibboleth.net/confluence/display/SHIB2/NativeSPServiceSSO -->
+                <SSO entityID="https://<idp-url>/idp/shibboleth" ECP="true">
+                    SAML2 SAML1
+                </SSO>
+
+                <!-- SAML and local-only logout. -->
+                <!-- https://wiki.shibboleth.net/confluence/display/SHIB2/NativeSPServiceLogout -->
+                <Logout>SAML2 Local</Logout>
+
+                <!--
+                Handlers allow you to interact with the SP and gather
+                more information. Try them out!
+                Attribute value s received by the SP through SAML
+                will be visible at:
+                http://<yourhosthere>/Shibboleth.sso/Session
+                -->
+
+                <!--
+                Extension service that generates "approximate" metadata
+                based on SP configuration.
+                -->
+                <Handler type="MetadataGenerator" Location="/Metadata"
+                signing="false"/>
+
+                <!-- Status reporting service. -->
+                <Handler type="Status" Location="/Status"
+                acl="127.0.0.1"/>
+
+                <!-- Session diagnostic service. -->
+                <Handler type="Session" Location="/Session"
+                showAttributeValues="true"/>
+                <!-- JSON feed of discovery information. -->
+                <Handler type="DiscoveryFeed" Location="/DiscoFeed"/>
+            </Sessions>
+
+            <!--
+            Error pages to display to yourself if
+            something goes horribly wrong.
+            -->
+            <Errors supportContact  ="<admin_email_address>"
+                logoLocation="/shibboleth-sp/logo.jpg"
+                styleSheet="/shibboleth-sp/main.css"/>
+
+            <!--
+            Loads and trusts a metadata file that describes only one IdP
+            and  how to communicate with it.
+            -->
+            <MetadataProvider type="XML" uri="<idp-metadata-file>"
+                 backingFilePath="<local idp metadata>"
+                 reloadInterval="180000" />
+
+            <!-- Attribute and trust options you shouldn't need to change. -->
+            <AttributeExtractor type="XML" validate="true"
+            path="attribute-map.xml"/>
+            <AttributeResolver type="Query" subjectMatch="true"/>
+            <AttributeFilter type="XML" validate="true"
+            path="attribute-policy.xml"/>
+
+            <!--
+            Your SP generated these credentials.
+            They're used to talk to IdP's.
+            -->
+            <CredentialResolver type="File" key="sp-key.pem"
+            certificate="sp-cert.pem"/>
+
+            <ApplicationOverride id="idp_1" entityID="https://<yourhosthere>/shibboleth">
+               <Sessions lifetime="28800" timeout="3600" checkAddress="false"
+               relayState="ss:mem" handlerSSL="false">
+
+                <!-- Triggers a login request directly to the TestShib IdP. -->
+                <SSO entityID="https://<idp_1-url>/idp/shibboleth" ECP="true">
+                    SAML2 SAML1
+                </SSO>
+
+                <Logout>SAML2 Local</Logout>
+               </Sessions>
+
+               <MetadataProvider type="XML" uri="<idp_1-metadata-file>"
+                 backingFilePath="<local idp_1 metadata>"
+                 reloadInterval="180000" />
+
+            </ApplicationOverride>
+
+            <ApplicationOverride id="idp_2" entityID="https://<yourhosthere>/shibboleth">
+               <Sessions lifetime="28800" timeout="3600" checkAddress="false"
+               relayState="ss:mem" handlerSSL="false">
+
+                <!-- Triggers a login request directly to the TestShib IdP. -->
+                <SSO entityID="https://<idp_2-url>/idp/shibboleth" ECP="true">
+                    SAML2 SAML1
+                </SSO>
+
+                <Logout>SAML2 Local</Logout>
+               </Sessions>
+
+               <MetadataProvider type="XML" uri="<idp_2-metadata-file>"
+                 backingFilePath="<local idp_2 metadata>"
+                 reloadInterval="180000" />
+
+            </ApplicationOverride>
+
+        </ApplicationDefaults>
+
+        <!--
+        Security policies you shouldn't change unless you
+        know what you're doing.
+        -->
+        <SecurityPolicyProvider type="XML" validate="true"
+        path="security-policy.xml"/>
+
+        <!--
+        Low-level configuration about protocols and bindings
+        available for use.
+        -->
+        <ProtocolProvider type="XML" validate="true" reloadChanges="false"
+        path="protocols.xml"/>
+
+    </SPConfig>
+
+Keystone enforces `external authentication`_ when the ``REMOTE_USER``
+environment variable is present so make sure Shibboleth doesn't set the
+``REMOTE_USER`` environment variable.  To do so, scan through the
+``/etc/shibboleth/shibboleth2.xml`` configuration file and remove the
+``REMOTE_USER`` directives.
+
+Examine your attributes map file ``/etc/shibboleth/attributes-map.xml`` and adjust
+your requirements if needed. For more information see
+`attributes documentation <https://wiki.shibboleth.net/confluence/display/SHIB2/NativeSPAddAttribute>`_
+
+Once you are done, restart your Shibboleth daemon:
+
+.. _`external authentication`: ../external-auth.html
+
+.. code-block:: bash
+
+    $ service shibd restart
+    $ service apache2 restart
diff --git a/keystone-moon/doc/source/external-auth.rst b/keystone-moon/doc/source/external-auth.rst
new file mode 100644 (file)
index 0000000..5f3c9af
--- /dev/null
@@ -0,0 +1,155 @@
+===========================================
+Using external authentication with Keystone
+===========================================
+.. _external-auth:
+
+When Keystone is executed in a web server like :doc:`Apache HTTPD
+<apache-httpd>` it is possible to use external authentication methods different
+from the authentication provided by the identity store backend or the different
+authentication plugins. For example, this makes possible to use an SQL identity
+backend together with, X.509 authentication or Kerberos, for example, instead
+of using the username and password combination.
+
+When a web server is in charge of authentication, it is normally possible to
+set the ``REMOTE_USER`` environment variable so that it can be used in the
+underlying application. Keystone can be configured to use that environment
+variable if set, so that the authentication is handled by the web server.
+
+Configuration
+=============
+
+In Identity API v2, there is no way to disable external authentication. In
+order to activate the external authentication mechanism for Identity API v3,
+the ``external`` method must be in the list of enabled authentication methods.
+By default it is enabled, so if you don't want to use external authentication,
+remove it from the ``methods`` option in the ``auth`` section.
+
+To configure the plugin that should be used set the ``external`` option again
+in the ``auth`` section. There are two external authentication method plugins
+provided by Keystone:
+
+* ``keystone.auth.plugins.external.Default``: This plugin won't take into
+  account the domain information that the external authentication method may
+  pass down to Keystone and will always use the configured default domain. The
+  ``REMOTE_USER`` variable is the username.
+
+* ``keystone.auth.plugins.external.Domain``: This plugin expects that the
+  ``REMOTE_DOMAIN`` variable contains the domain for the user. If this variable
+  is not present, the configured default domain will be used. The
+  ``REMOTE_USER`` variable is the username.
+
+Using HTTPD authentication
+==========================
+
+Web servers like Apache HTTP support many methods of authentication. Keystone
+can profit from this feature and let the authentication be done in the web
+server, that will pass down the authenticated user to Keystone using the
+``REMOTE_USER`` environment variable. This user must exist in advance in the
+identity backend to get a token from the controller.
+
+To use this method, Keystone should be running on :doc:`HTTPD <apache-httpd>`.
+
+X.509 example
+-------------
+
+The following snippet for the Apache conf will authenticate the user based on
+a valid X.509 certificate from a known CA::
+
+    <VirtualHost _default_:5000>
+        SSLEngine on
+        SSLCertificateFile    /etc/ssl/certs/ssl.cert
+        SSLCertificateKeyFile /etc/ssl/private/ssl.key
+
+        SSLCACertificatePath /etc/ssl/allowed_cas
+        SSLCARevocationPath  /etc/ssl/allowed_cas
+        SSLUserName          SSL_CLIENT_S_DN_CN
+        SSLVerifyClient      require
+        SSLVerifyDepth       10
+
+        (...)
+    </VirtualHost>
+
+Developing a WSGI middleware for authentication
+===============================================
+
+In addition to the method described above, it is possible to implement other
+custom authentication mechanisms using the ``REMOTE_USER`` WSGI environment
+variable.
+
+.. ATTENTION::
+
+    Please note that even if it is possible to develop a custom authentication
+    module, it is preferable to use the modules in the HTTPD server. Such
+    authentication modules in webservers like Apache have normally undergone
+    years of development and use in production systems and are actively
+    maintained upstream. Developing a custom authentication module that
+    implements the same authentication as an existing Apache module likely
+    introduces a higher security risk.
+
+If you find you must implement a custom authentication mechanism, you will need
+to develop a custom WSGI middleware pipeline component. This middleware should
+set the environment variable ``REMOTE_USER`` to the authenticated username.
+Keystone then will assume that the user has been already authenticated upstream
+and will not try to authenticate it. However, as with HTTPD authentication, the
+user must exist in advance in the identity backend so that a proper token can
+be issued.
+
+Your code should set the ``REMOTE_USER`` if the user is properly authenticated,
+following the semantics below:
+
+.. code-block:: python
+
+    from keystone.common import wsgi
+    from keystone import exception
+
+    class MyMiddlewareAuth(wsgi.Middleware):
+        def __init__(self, *args, **kwargs):
+            super(MyMiddlewareAuth, self).__init__(*args, **kwargs)
+
+        def process_request(self, request):
+            if request.environ.get('REMOTE_USER', None) is not None:
+                # Assume that it is authenticated upstream
+                return self.application
+
+            if not self.is_auth_applicable(request):
+                # Not applicable
+                return self.application
+
+            username = self.do_auth(request)
+            if username is not None:
+                # User is authenticated
+                request.environ['REMOTE_USER'] = username
+            else:
+                # User is not authenticated, render exception
+                raise exception.Unauthorized("Invalid user")
+
+
+Pipeline configuration
+----------------------
+
+Once you have your WSGI middleware component developed you have to add it to
+your pipeline. The first step is to add the middleware to your configuration
+file. Assuming that your middleware module is
+``keystone.middleware.MyMiddlewareAuth``, you can configure it in your
+``keystone-paste.ini`` as::
+
+    [filter:my_auth]
+    paste.filter_factory = keystone.middleware.MyMiddlewareAuth.factory
+
+The second step is to add your middleware to the pipeline. The exact place
+where you should place it will depend on your code (i.e. if you need for
+example that the request body is converted from JSON before perform the
+authentication you should place it after the ``json_body`` filter) but it
+should be set before the ``public_service`` (for the ``public_api`` pipeline)
+or ``admin_service`` (for the ``admin_api`` pipeline), since they consume
+authentication.
+
+For example, if the original pipeline looks like this::
+
+    [pipeline:public_api]
+    pipeline = url_normalize token_auth admin_token_auth json_body debug ec2_extension user_crud_extension public_service
+
+Your modified pipeline might then look like this::
+
+    [pipeline:public_api]
+    pipeline = url_normalize token_auth admin_token_auth json_body my_auth debug ec2_extension user_crud_extension public_service
diff --git a/keystone-moon/doc/source/http-api.rst b/keystone-moon/doc/source/http-api.rst
new file mode 100644 (file)
index 0000000..a104ce3
--- /dev/null
@@ -0,0 +1,227 @@
+..
+    Licensed under the Apache License, Version 2.0 (the "License"); you may not
+    use this file except in compliance with the License. You may obtain a copy
+    of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+    License for the specific language governing permissions and limitations
+    under the License.
+
+========
+HTTP API
+========
+
+Specifications
+==============
+
+Keystone implements two major HTTP API versions, along with several API
+extensions that build on top of each core API. The two APIs are specified as
+`Identity API v2.0`_ and `Identity API v3`_. Each API is specified by a single
+source of truth to avoid conflicts between documentation and implementation.
+The original source of truth for the v2.0 API is defined by a set of WADL and
+XSD files. The original source of truth for the v3 API is defined by
+documentation.
+
+.. _`Identity API v2.0`: https://github.com/openstack/identity-api/tree/master/v2.0/src
+.. _`Identity API v3`: https://github.com/openstack/identity-api/tree/master/v3/src/markdown
+
+History
+=======
+
+You're probably wondering why Keystone does not implement a "v1" API. As a
+matter of fact, one exists, but it actually predates OpenStack. The v1.x API
+was an extremely small API documented and implemented by Rackspace for their
+early public cloud products.
+
+With the advent of OpenStack, Keystone served to provide a superset of the
+authentication and multi-tenant authorization models already implemented by
+Rackspace's public cloud, Nova, and Swift. Thus, Identity API v2.0 was
+introduced.
+
+Identity API v3 was established to introduce namespacing for users and projects
+by using "domains" as a higher-level container for more flexible identity
+management and fixed a security issue in the v2.0 API (bearer tokens appearing
+in URLs).
+
+Should I use v2.0 or v3?
+========================
+
+Identity API v3.
+
+Identity API v3 is a superset of all the functionality available in v2.0 and
+several of its extensions, and provides a much more consistent developer
+experience to boot. We're also on the road to deprecating, and ultimately
+reducing (or dropping) support for, Identity API v2.0.
+
+How do I migrate from v2.0 to v3?
+=================================
+
+I am a deployer
+---------------
+
+You'll need to ensure the v3 API is included in your Paste pipeline, usually
+``etc/keystone-paste.ini``. Our `latest sample configuration`_ includes the v3
+application pipeline.
+
+First define a v3 application, which refers to the v3 application factory
+method:
+
+.. code-block:: ini
+
+    [app:service_v3]
+    paste.app_factory = keystone.service:v3_app_factory
+
+Then define a v3 pipeline, which terminates with the v3 application you defined
+above:
+
+.. code-block:: ini
+
+    [app:app_v3]
+    pipeline = ... service_v3
+
+Replace "..." with whatever middleware you'd like to run in front of the API
+service. Our `latest sample configuration`_ documents our tested
+recommendations, but your requirements may vary.
+
+Finally, include the v3 pipeline in at least one ``composite`` application (but
+usually both ``[composite:main]`` and ``[composite:admin]``), for example:
+
+.. code-block:: ini
+
+    [composite:main]
+    use = egg:Paste#urlmap
+    /v3 = api_v3
+    ...
+
+Once your pipeline is configured to expose both v2.0 and v3, you need to ensure
+that you've configured your service catalog in Keystone correctly. The
+simplest, and most ideal, configuration would expose one identity with
+unversioned endpoints (note the lack of ``/v2.0/`` or ``/v3/`` in these URLs):
+
+- Service (type: ``identity``)
+
+  - Endpoint (interface: ``public``, URL: ``http://identity:5000/``)
+  - Endpoint (interface: ``admin``, URL: ``http://identity:35357/``)
+
+If you were to perform a ``GET`` against either of these endpoints, you would
+be greeted by an ``HTTP/1.1 300 Multiple Choices`` response, which newer
+Keystone clients can use to automatically detect available API versions.
+
+.. code-block:: bash
+
+    $ curl -i http://identity:35357/
+    HTTP/1.1 300 Multiple Choices
+    Vary: X-Auth-Token
+    Content-Type: application/json
+    Content-Length: 755
+    Date: Tue, 10 Jun 2014 14:22:26 GMT
+
+    {"versions": {"values": [ ... ]}}
+
+With unversioned ``identity`` endpoints in the service catalog, you should be
+able to `authenticate with keystoneclient`_ successfully.
+
+.. _`latest sample configuration`: https://github.com/openstack/keystone/blob/master/etc/keystone-paste.ini
+.. _`authenticate with keystoneclient`: http://docs.openstack.org/developer/python-keystoneclient/using-api-v3.html#authenticating
+
+I have a Python client
+----------------------
+
+The Keystone community provides first-class support for Python API consumers
+via our client library, `python-keystoneclient`_. If you're not currently using
+this library, you should, as it is intended to expose all of our HTTP API
+functionality. If we're missing something you're looking for, please
+contribute!
+
+Adopting `python-keystoneclient`_ should be the easiest way to migrate to
+Identity API v3.
+
+.. _`python-keystoneclient`: https://pypi.python.org/pypi/python-keystoneclient/
+
+I have a non-Python client
+--------------------------
+
+You'll likely need to heavily reference our `API documentation`_ to port your
+application to Identity API v3.
+
+.. _`API documentation`: https://github.com/openstack/identity-api/blob/master/v3/src/markdown/identity-api-v3.md
+
+The most common operation would be password-based authentication including a
+tenant name (i.e. project name) to specify an authorization scope. In Identity
+API v2.0, this would be a request to ``POST /v2.0/tokens``:
+
+.. code-block:: javascript
+
+    {
+        "auth": {
+            "passwordCredentials": {
+                "password": "my-password",
+                "username": "my-username"
+            },
+            "tenantName": "project-x"
+        }
+    }
+
+And you would get back a JSON blob with an ``access`` -> ``token`` -> ``id``
+that you could pass to another web service as your ``X-Auth-Token`` header
+value.
+
+In Identity API v3, an equivalent request would be to ``POST /v3/auth/tokens``:
+
+.. code-block:: javascript
+
+    {
+        "auth": {
+            "identity": {
+                "methods": [
+                    "password"
+                ],
+                "password": {
+                    "user": {
+                        "domain": {
+                            "id": "default"
+                        },
+                        "name": "my-username",
+                        "password": "my-password"
+                    }
+                }
+            },
+            "scope": {
+                "project": {
+                    "domain": {
+                        "id": "default"
+                    },
+                    "name": "project-x"
+                }
+            }
+        }
+    }
+
+Note a few key differences when compared to the v2.0 API:
+
+- A "tenant" in v2.0 became a "project" in v3.
+- The authentication method (``password``) is explicitly identified.
+- Both the user name (``my-username``) and project name (``project-x``) are
+  namespaced by an owning domain (where ``id`` = ``default``). The "default"
+  domain exists by default in Keystone, and automatically owns the namespace
+  exposed by Identity API v2.0. Alternatively, you may reference users and
+  projects that exist outside the namespace of the default domain, which are
+  thus inaccessible to the v2.0 API.
+- In v3, your token is returned to you in an ``X-Subject-Token`` header,
+  instead of as part of the request body. You should still authenticate
+  yourself to other services using the ``X-Auth-Token`` header.
+
+
+HTTP/1.1 Chunked Encoding
+=========================
+.. WARNING::
+
+    Running Keystone under HTTPD in the recommended (and tested) configuration does not support
+    the use of ``Transfer-Encoding: chunked``. This is due to a limitation with the WSGI spec
+    and the implementation used by ``mod_wsgi``. Support for chunked encoding under ``eventlet``
+    may or may not continue. It is recommended that all clients assume Keystone will not support
+    ``Transfer-Encoding: chunked``.
diff --git a/keystone-moon/doc/source/index.rst b/keystone-moon/doc/source/index.rst
new file mode 100644 (file)
index 0000000..48129a8
--- /dev/null
@@ -0,0 +1,97 @@
+..
+      Copyright 2011-2012 OpenStack Foundation
+      All Rights Reserved.
+
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+====================================================
+Welcome to Keystone, the OpenStack Identity Service!
+====================================================
+
+Keystone is an OpenStack project that provides Identity, Token, Catalog and
+Policy services for use specifically by projects in the OpenStack family.
+It implements `OpenStack's Identity API`_.
+
+This document describes Keystone for contributors of the project, and assumes
+that you are already familiar with Keystone from an `end-user perspective`_.
+
+.. _`OpenStack's Identity API`: http://specs.openstack.org/openstack/keystone-specs/
+.. _`end-user perspective`: http://docs.openstack.org/
+
+This documentation is generated by the Sphinx toolkit and lives in the source
+tree. Also see the :doc:`community` page for other ways to interact with the
+community.
+
+Related Identity Projects
+=========================
+
+In addition to creating OpenStack's Identity Service, the Keystone team also
+provides a `WSGI middleware`_, as well as `Python client library`_.
+
+.. _`WSGI middleware`: http://docs.openstack.org/developer/keystonemiddleware/
+.. _`Python client library`: http://docs.openstack.org/developer/python-keystoneclient/
+
+Getting Started
+===============
+
+.. toctree::
+    :maxdepth: 1
+
+    setup
+    installing
+    configuration
+    configure_federation
+    configuringservices
+    extensions
+    key_terms
+    community
+
+Man Pages
+=========
+
+.. toctree::
+    :maxdepth: 1
+
+    man/keystone-all
+    man/keystone-manage
+
+Developers Documentation
+========================
+.. toctree::
+   :maxdepth: 1
+
+   developing
+   architecture
+   middlewarearchitecture
+   http-api
+   api_curl_examples
+   cli_examples
+   apache-httpd
+   external-auth
+   event_notifications
+   extension_development
+
+Code Documentation
+==================
+.. toctree::
+   :maxdepth: 1
+
+   api/modules
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/keystone-moon/doc/source/installing.rst b/keystone-moon/doc/source/installing.rst
new file mode 100644 (file)
index 0000000..0492da7
--- /dev/null
@@ -0,0 +1,125 @@
+..
+      Copyright 2012 OpenStack Foundation
+      Copyright 2012 Nebula, Inc
+      All Rights Reserved.
+
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+===================
+Installing Keystone
+===================
+
+This document describes how to install Keystone in order to use it. If you are
+intending to develop on or with Keystone, please read :doc:`developing` and
+:doc:`setup`.
+
+Installing from Source
+----------------------
+
+The source install instructions specifically avoid using platform specific
+packages, instead using the source for the code and the Python Package Index
+(PyPi_).
+
+.. _PyPi: http://pypi.python.org/pypi
+
+It's expected that your system already has python_, pip_, and git_ available.
+
+.. _python: http://www.python.org
+.. _pip: http://www.pip-installer.org/en/latest/installing.html
+.. _git: http://git-scm.com/
+
+Clone the Keystone repository:
+
+.. code-block:: bash
+
+    $ git clone http://github.com/openstack/keystone.git
+    $ cd keystone
+
+Install the Keystone web service:
+
+.. code-block:: bash
+
+    $ python setup.py install
+
+You should have all the pieces you need to run Keystone installed on your
+system. The following commands should be available on the command-line path:
+
+* ``keystone`` the Keystone client, used to interact with Keystone
+* ``keystone-manage`` used to bootstrap Keystone data
+* ``keystone-all`` used to run the Keystone services
+
+You will find sample configuration files in ``etc/``:
+
+* ``keystone.conf``
+* ``keystone-paste.ini``
+* ``logging.conf``
+* ``policy.json``
+* ``default_catalog.templates``
+
+From here, refer to :doc:`configuration` to choose which backend drivers to
+enable and use. Once configured, you should be able to run Keystone by issuing
+the command:
+
+.. code-block:: bash
+
+    $ keystone-all
+
+By default, this will show logging on the console from which it was started.
+Once started, you can initialize data in Keystone for use with the rest of
+OpenStack, as described in :doc:`configuringservices`.
+
+An excellent reference implementation of setting up Keystone is DEVSTACK_,
+most commonly used for development and testing setup of not only Keystone,
+but all of the core OpenStack projects.
+
+.. _DEVSTACK: http://devstack.org/
+
+The script with the latest examples of initializing data in Keystone is a
+bash script called keystone_data.sh_
+
+.. _keystone_data.sh: https://github.com/openstack-dev/devstack/blob/master/files/keystone_data.sh
+
+Installing from packages: Ubuntu
+--------------------------------
+
+Ubuntu is providing packages for Keystone for Precise. To install keystone
+on Ubuntu:
+
+.. code-block:: bash
+
+    $ sudo apt-get install keystone
+
+In using Ubuntu's packages, the packages will set up a user account for
+the Keystone service (`keystone`), and place default configurations in
+``/etc/keystone``. The Debian installer will also ask you about configuration
+options for setting up and running Keystone. As of this writing, the defaults
+for Keystone backends are all SQL based, stored locally in a SQLite.
+
+Once installed, you still need to initialize data in Keystone, which you can
+find described in :doc:`configuringservices`.
+
+Installing from packages: Fedora
+--------------------------------
+
+Installing Keystone with Fedora 17 is documented at
+http://fedoraproject.org/wiki/Getting_started_with_OpenStack_on_Fedora_17.
+
+To install the packages:
+
+.. code-block:: bash
+
+    $ sudo yum install --enablerepo=updates-testing openstack-keystone
+
+Once installed, you can configure Keystone based on the instructions at:
+
+http://fedoraproject.org/wiki/Getting_started_with_OpenStack_on_Fedora_17#Configuring_Keystone_for_authentication
diff --git a/keystone-moon/doc/source/key_terms.rst b/keystone-moon/doc/source/key_terms.rst
new file mode 100644 (file)
index 0000000..93aec53
--- /dev/null
@@ -0,0 +1,185 @@
+=========
+Key Terms
+=========
+
+This document describes the different resource types that are available in
+OpenStack's Identity Service.
+
+Identity
+========
+
+The Identity portion of keystone includes ``Users`` and ``Groups``, and may be
+backed by SQL or more commonly LDAP.
+
+Users
+-----
+
+``Users`` represent an individual API consumer. A user itself must be owned by
+a specific domain, and hence all user names are **not** globally unique, but
+only unique to their domain.
+
+Groups
+------
+
+``Groups`` are a container representing a collection of users. A group itself
+must be owned by a specific domain, and hence all group names are **not**
+globally unique, but only unique to their domain.
+
+Resources
+=========
+
+The Identity portion of keystone includes ``Projects`` and ``Domains``, and
+are commonly stored in an SQL backend.
+
+Projects (Tenants)
+------------------
+
+``Projects`` (known as Tenants in v2.0) represent the base unit of
+``ownership`` in OpenStack, in that all resources in OpenStack should be owned
+by a specific project.
+A project itself must be owned by a specific domain, and hence all project
+names are **not** globally unique, but unique to their domain.
+If the domain for a project is not specified, then it is added to the default
+domain.
+
+Domains
+-------
+
+``Domains`` are a high-level container for projects, users and groups. Each is
+owned by exactly one domain. Each domain defines a namespace where certain an
+API-visible name attribute exists. keystone provides a default domain, aptly
+named 'Default'.
+
+In the Identity v3 API, the uniqueness of attributes is as follows:
+
+- Domain Name. Globally unique across all domains.
+
+- Role Name. Globally unique across all domains.
+
+- User Name. Unique within the owning domain.
+
+- Project Name. Unique within the owning domain.
+
+- Group Name. Unique within the owning domain.
+
+Due to their container architecture, domains may be used as a way to delegate
+management of OpenStack resources. A user in a domain may still access
+resources in another domain, if an appropriate assignment is granted.
+
+
+Assignment
+==========
+
+Roles
+-----
+
+``Roles`` dictate the level of authorization the end user can obtain. Roles
+can be granted at either the domain or project level. Role can be assigned to
+the individual user or at the group level. Role names are globally unique.
+
+Role Assignments
+----------------
+
+A 3-tuple that has a ``Role``, a ``Resource`` and an ``Identity``.
+
+What's needed to Authenticate?
+==============================
+
+Two pieces of information are required to authenticate with keystone, a
+bit of ``Resource`` information and a bit of ``Identity``.
+
+Take the following call POST data for instance:
+
+.. code-block:: javascript
+
+    {
+        "auth": {
+            "identity": {
+                "methods": [
+                    "password"
+                ],
+                "password": {
+                    "user": {
+                        "id": "0ca8f6",
+                        "password": "secretsecret"
+                    }
+                }
+            },
+            "scope": {
+                "project": {
+                    "id": "263fd9"
+                }
+            }
+        }
+    }
+
+The user (ID of 0ca8f6) is attempting to retrieve a token that is scoped to
+project (ID of 263fd9).
+
+To perform the same call with names instead of IDs, we now need to supply
+information about the domain. This is because usernames are only unique within
+a given domain, but user IDs are supposed to be unique across the deployment.
+Thus, the auth request looks like the following:
+
+.. code-block:: javascript
+
+    {
+        "auth": {
+            "identity": {
+                "methods": [
+                    "password"
+                ],
+                "password": {
+                    "user": {
+                        "domain": {
+                            "name": "acme"
+                        }
+                        "name": "userA",
+                        "password": "secretsecret"
+                    }
+                }
+            },
+            "scope": {
+                "project": {
+                    "domain": {
+                        "id": "1789d1"
+                    },
+                    "name": "project-x"
+                }
+            }
+        }
+    }
+
+For both the user and the project portion, we must supply either a domain ID
+or a domain name, in order to properly determine the correct user and project.
+
+Alternatively, if we wanted to represent this as environment variables for a
+command line, it would be:
+
+.. code-block:: bash
+
+    $ export OS_PROJECT_DOMAIN_ID=1789d1
+    $ export OS_USER_DOMAIN_NAME=acme
+    $ export OS_USERNAME=userA
+    $ export OS_PASSWORD=secretsecret
+    $ export OS_PROJECT_NAME=project-x
+
+Note that the project the user it attempting to access must be in the same
+domain as the user.
+
+What is Scope?
+==============
+
+Scope is an overloaded term.
+
+In reference to authenticating, as seen above, scope refers to the portion
+of the POST data that dictates what ``Resource`` (project or domain) the user
+wants to access.
+
+In reference to tokens, scope refers to the effectiveness of a token,
+i.e.: a `project-scoped` token is only useful on the project it was initially
+granted for. A `domain-scoped` token may be used to perform domain-related
+function.
+
+In reference to users, groups, and projects, scope often refers to the domain
+that the entity is owned by. i.e.: a user in domain X is scoped to domain X.
diff --git a/keystone-moon/doc/source/man/keystone-all.rst b/keystone-moon/doc/source/man/keystone-all.rst
new file mode 100644 (file)
index 0000000..328b0c4
--- /dev/null
@@ -0,0 +1,112 @@
+============
+keystone-all
+============
+
+------------------------
+Keystone Startup Command
+------------------------
+
+:Author: openstack@lists.openstack.org
+:Date:   2014-10-16
+:Copyright: OpenStack Foundation
+:Version: 2014.2
+:Manual section: 1
+:Manual group: cloud computing
+
+SYNOPSIS
+========
+
+::
+
+  keystone-all [-h] [--config-dir DIR] [--config-file PATH] [--debug]
+                    [--log-config-append PATH] [--log-date-format DATE_FORMAT]
+                    [--log-dir LOG_DIR] [--log-file PATH]
+                    [--log-format FORMAT] [--nodebug] [--nostandard-threads]
+                    [--nouse-syslog] [--nouse-syslog-rfc-format] [--noverbose]
+                    [--pydev-debug-host PYDEV_DEBUG_HOST]
+                    [--pydev-debug-port PYDEV_DEBUG_PORT] [--standard-threads]
+                    [--syslog-log-facility SYSLOG_LOG_FACILITY] [--use-syslog]
+                    [--use-syslog-rfc-format] [--verbose] [--version]
+
+DESCRIPTION
+===========
+
+keystone-all starts both the service and administrative APIs in a single
+process to provide catalog, authorization, and authentication services for
+OpenStack.
+
+OPTIONS
+=======
+
+  -h, --help            show this help message and exit
+  --config-dir DIR      Path to a config directory to pull \*.conf files from.
+                        This file set is sorted, so as to provide a
+                        predictable parse order if individual options are
+                        over-ridden. The set is parsed after the file(s)
+                        specified via previous --config-file, arguments hence
+                        over-ridden options in the directory take precedence.
+  --config-file PATH    Path to a config file to use. Multiple config files
+                        can be specified, with values in later files taking
+                        precedence. The default files used are: None.
+  --debug, -d           Print debugging output (set logging level to DEBUG
+                        instead of default WARNING level).
+  --log-config-append PATH, --log_config PATH
+                        The name of a logging configuration file. This file is
+                        appended to any existing logging configuration files.
+                        For details about logging configuration files, see the
+                        Python logging module documentation.
+  --log-date-format DATE_FORMAT
+                        Format string for %(asctime)s in log records. Default:
+                        None .
+  --log-dir LOG_DIR, --logdir LOG_DIR
+                        (Optional) The base directory used for relative --log-
+                        file paths.
+  --log-file PATH, --logfile PATH
+                        (Optional) Name of log file to output to. If no
+                        default is set, logging will go to stdout.
+  --log-format FORMAT   DEPRECATED. A logging.Formatter log message format
+                        string which may use any of the available
+                        logging.LogRecord attributes. This option is
+                        deprecated. Please use logging_context_format_string
+                        and logging_default_format_string instead.
+  --nodebug             The inverse of --debug
+  --nostandard-threads  The inverse of --standard-threads
+  --nouse-syslog        The inverse of --use-syslog
+  --nouse-syslog-rfc-format
+                        The inverse of --use-syslog-rfc-format
+  --noverbose           The inverse of --verbose
+  --pydev-debug-host PYDEV_DEBUG_HOST
+                        Host to connect to for remote debugger.
+  --pydev-debug-port PYDEV_DEBUG_PORT
+                        Port to connect to for remote debugger.
+  --standard-threads    Do not monkey-patch threading system modules.
+  --syslog-log-facility SYSLOG_LOG_FACILITY
+                        Syslog facility to receive log lines.
+  --use-syslog          Use syslog for logging. Existing syslog format is
+                        DEPRECATED during I, and will change in J to honor
+                        RFC5424.
+  --use-syslog-rfc-format
+                        (Optional) Enables or disables syslog rfc5424 format
+                        for logging. If enabled, prefixes the MSG part of the
+                        syslog message with APP-NAME (RFC5424). The format
+                        without the APP-NAME is deprecated in I, and will be
+                        removed in J.
+  --verbose, -v         Print more verbose output (set logging level to INFO
+                        instead of default WARNING level).
+  --version             show program's version number and exit
+
+FILES
+=====
+
+None
+
+SEE ALSO
+========
+
+* `OpenStack Keystone <http://keystone.openstack.org>`__
+
+SOURCE
+======
+
+* Keystone source is managed in GitHub `Keystone <http://github.com/openstack/keystone>`__
+* Keystone bugs are managed at Launchpad `Keystone <https://bugs.launchpad.net/keystone>`__
diff --git a/keystone-moon/doc/source/man/keystone-manage.rst b/keystone-moon/doc/source/man/keystone-manage.rst
new file mode 100644 (file)
index 0000000..b2ea392
--- /dev/null
@@ -0,0 +1,125 @@
+===============
+keystone-manage
+===============
+
+---------------------------
+Keystone Management Utility
+---------------------------
+
+:Author: openstack@lists.openstack.org
+:Date:   2014-10-16
+:Copyright: OpenStack Foundation
+:Version: 2014.2
+:Manual section: 1
+:Manual group: cloud computing
+
+SYNOPSIS
+========
+
+  keystone-manage [options]
+
+DESCRIPTION
+===========
+
+``keystone-manage`` is the command line tool which interacts with the Keystone
+service to initialize and update data within Keystone. Generally,
+``keystone-manage`` is only used for operations that cannot be accomplished
+with the HTTP API, such data import/export and database migrations.
+
+USAGE
+=====
+
+    ``keystone-manage [options] action [additional args]``
+
+General keystone-manage options:
+--------------------------------
+
+* ``--help`` : display verbose help output.
+
+Invoking ``keystone-manage`` by itself will give you some usage information.
+
+Available commands:
+
+* ``db_sync``: Sync the database.
+* ``db_version``: Print the current migration version of the database.
+* ``mapping_purge``: Purge the identity mapping table.
+* ``pki_setup``: Initialize the certificates used to sign tokens.
+* ``saml_idp_metadata``: Generate identity provider metadata.
+* ``ssl_setup``: Generate certificates for SSL.
+* ``token_flush``: Purge expired tokens.
+
+OPTIONS
+=======
+
+  -h, --help            show this help message and exit
+  --config-dir DIR      Path to a config directory to pull \*.conf files from.
+                        This file set is sorted, so as to provide a
+                        predictable parse order if individual options are
+                        over-ridden. The set is parsed after the file(s)
+                        specified via previous --config-file, arguments hence
+                        over-ridden options in the directory take precedence.
+  --config-file PATH    Path to a config file to use. Multiple config files
+                        can be specified, with values in later files taking
+                        precedence. The default files used are: None.
+  --debug, -d           Print debugging output (set logging level to DEBUG
+                        instead of default WARNING level).
+  --log-config-append PATH, --log_config PATH
+                        The name of a logging configuration file. This file is
+                        appended to any existing logging configuration files.
+                        For details about logging configuration files, see the
+                        Python logging module documentation.
+  --log-date-format DATE_FORMAT
+                        Format string for %(asctime)s in log records. Default:
+                        None .
+  --log-dir LOG_DIR, --logdir LOG_DIR
+                        (Optional) The base directory used for relative --log-
+                        file paths.
+  --log-file PATH, --logfile PATH
+                        (Optional) Name of log file to output to. If no
+                        default is set, logging will go to stdout.
+  --log-format FORMAT   DEPRECATED. A logging.Formatter log message format
+                        string which may use any of the available
+                        logging.LogRecord attributes. This option is
+                        deprecated. Please use logging_context_format_string
+                        and logging_default_format_string instead.
+  --nodebug             The inverse of --debug
+  --nostandard-threads  The inverse of --standard-threads
+  --nouse-syslog        The inverse of --use-syslog
+  --nouse-syslog-rfc-format
+                        The inverse of --use-syslog-rfc-format
+  --noverbose           The inverse of --verbose
+  --pydev-debug-host PYDEV_DEBUG_HOST
+                        Host to connect to for remote debugger.
+  --pydev-debug-port PYDEV_DEBUG_PORT
+                        Port to connect to for remote debugger.
+  --standard-threads    Do not monkey-patch threading system modules.
+  --syslog-log-facility SYSLOG_LOG_FACILITY
+                        Syslog facility to receive log lines.
+  --use-syslog          Use syslog for logging. Existing syslog format is
+                        DEPRECATED during I, and will change in J to honor
+                        RFC5424.
+  --use-syslog-rfc-format
+                        (Optional) Enables or disables syslog rfc5424 format
+                        for logging. If enabled, prefixes the MSG part of the
+                        syslog message with APP-NAME (RFC5424). The format
+                        without the APP-NAME is deprecated in I, and will be
+                        removed in J.
+  --verbose, -v         Print more verbose output (set logging level to INFO
+                        instead of default WARNING level).
+  --version             show program's version number and exit
+
+FILES
+=====
+
+None
+
+SEE ALSO
+========
+
+* `OpenStack Keystone <http://keystone.openstack.org>`__
+
+SOURCE
+======
+
+* Keystone is sourced in GitHub `Keystone <http://github.com/openstack/keystone>`__
+* Keystone bugs are managed at Launchpad `Keystone <https://bugs.launchpad.net/keystone>`__
diff --git a/keystone-moon/doc/source/middlewarearchitecture.rst b/keystone-moon/doc/source/middlewarearchitecture.rst
new file mode 100644 (file)
index 0000000..7b95851
--- /dev/null
@@ -0,0 +1,34 @@
+..
+      Copyright 2011-2012 OpenStack Foundation
+      All Rights Reserved.
+
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+=======================
+Middleware Architecture
+=======================
+
+Abstract
+========
+
+The Keystone middleware architecture supports a common authentication protocol
+in use between the OpenStack projects. By using Keystone as a common
+authentication and authorization mechanism, the OpenStack project can plug in
+to existing authentication and authorization systems in use by existing
+environments.
+
+The auth_token middleware is no longer hosted in Keystone and has moved to the
+keystonemiddleware project. The `documentation regarding authentication
+middleware`_ can be found there.
+
+.. _`documentation regarding authentication middleware`: http://docs.openstack.org/developer/keystonemiddleware/middlewarearchitecture.html
diff --git a/keystone-moon/doc/source/setup.rst b/keystone-moon/doc/source/setup.rst
new file mode 100644 (file)
index 0000000..f919dcc
--- /dev/null
@@ -0,0 +1,175 @@
+..
+      Copyright 2011-2012 OpenStack Foundation
+      All Rights Reserved.
+
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+=============================================
+Setting up a Keystone development environment
+=============================================
+
+This document describes getting the source from keystone's `GitHub repository`_
+for development purposes.
+
+To install Keystone from packaging, refer instead to Keystone's `User
+Documentation`_.
+
+.. _`GitHub Repository`: http://github.com/openstack/keystone
+.. _`User Documentation`: http://docs.openstack.org/
+
+Prerequisites
+=============
+
+This document assumes you are using:
+
+- Ubuntu, Fedora or openSUSE (SLE)
+- `Python 2.7`_
+
+.. _`Python 2.7`: http://www.python.org/
+
+And that you have the following tools available on your system:
+
+- git_
+- setuptools_
+- pip_
+- msgfmt (part of the gettext package)
+- virtualenv_
+
+**Reminder**: If you're successfully using a different platform, or a
+different version of the above, please document your configuration here!
+
+.. _git: http://git-scm.com/
+.. _setuptools: http://pypi.python.org/pypi/setuptools
+
+Getting the latest code
+=======================
+
+Make a clone of the code from our `Github repository`:
+
+.. code-block:: bash
+
+    $ git clone https://github.com/openstack/keystone.git
+
+When that is complete, you can:
+
+.. code-block:: bash
+
+    $ cd keystone
+
+Installing dependencies
+=======================
+
+Keystone maintains two lists of dependencies::
+
+    requirements.txt
+    test-requirements.txt
+
+The first is the list of dependencies needed for running keystone, the second list includes dependencies used for active development and testing of Keystone itself.
+
+These dependencies can be installed from PyPi_ using the Python tool pip_.
+
+.. _PyPi: http://pypi.python.org/
+.. _pip: http://pypi.python.org/pypi/pip
+
+However, your system *may* need additional dependencies that `pip` (and by
+extension, PyPi) cannot satisfy. These dependencies should be installed
+prior to using `pip`, and the installation method may vary depending on
+your platform.
+
+Ubuntu 12.04:
+
+.. code-block:: bash
+
+    $ sudo apt-get install python-dev libxml2-dev libxslt1-dev libsasl2-dev libsqlite3-dev libssl-dev libldap2-dev libffi-dev
+
+
+Fedora 19+:
+
+.. code-block:: bash
+
+    $ sudo yum install python-sqlite2 python-lxml python-greenlet-devel python-ldap sqlite-devel openldap-devel python-devel libxslt-devel openssl-devel
+
+openSUSE 13.2 (SLE 12):
+
+.. code-block:: bash
+
+    $ sudo zypper install libxslt-devel openldap2-devel libopenssl-devel python-devel python-greenlet-devel python-ldap python-lxml python-pysqlite sqlite3-devel
+
+PyPi Packages and VirtualEnv
+----------------------------
+
+We recommend establishing a virtualenv to run Keystone within. virtualenv
+limits the Python environment to just what you're installing as dependencies,
+useful to keep a clean environment for working on Keystone. The tools directory
+in Keystone has a script already created to make this very simple:
+
+.. code-block:: bash
+
+    $ python tools/install_venv.py
+
+This will create a local virtual environment in the directory ``.venv``.
+Once created, you can activate this virtualenv for your current shell using:
+
+.. code-block:: bash
+
+    $ source .venv/bin/activate
+
+The virtual environment can be disabled using the command:
+
+.. code-block:: bash
+
+    $ deactivate
+
+You can also use ``tools\with_venv.sh`` to prefix commands so that they run
+within the virtual environment. For more information on virtual environments,
+see virtualenv_.
+
+.. _virtualenv: http://www.virtualenv.org/
+
+If you want to run Keystone outside of a virtualenv, you can install the
+dependencies directly into your system from the requires files:
+
+.. code-block:: bash
+
+    # Install the dependencies for running keystone
+    $ pip install -r requirements.txt
+
+    # Install the dependencies for developing, testing, and running keystone
+    $ pip install -r test-requirements.txt
+
+    # Use 'python setup.py' to link Keystone into Python's site-packages
+    $ python setup.py develop
+
+
+Verifying Keystone is set up
+============================
+
+Once set up, either directly or within a virtualenv, you should be able to
+invoke Python and import the libraries. If you're using a virtualenv, don't
+forget to activate it:
+
+.. code-block:: bash
+
+    $ source .venv/bin/activate
+    $ python
+
+You should then be able to `import keystone` from your Python shell
+without issue:
+
+.. code-block:: python
+
+    >>> import keystone
+    >>>
+
+If you can import Keystone successfully, you should be ready to move on to
+:doc:`developing`.
diff --git a/keystone-moon/etc/default_catalog.templates b/keystone-moon/etc/default_catalog.templates
new file mode 100644 (file)
index 0000000..a69b7f0
--- /dev/null
@@ -0,0 +1,27 @@
+# config for templated.Catalog, using camelCase because I don't want to do
+# translations for keystone compat
+catalog.RegionOne.identity.publicURL = http://localhost:$(public_port)s/v2.0
+catalog.RegionOne.identity.adminURL = http://localhost:$(admin_port)s/v2.0
+catalog.RegionOne.identity.internalURL = http://localhost:$(public_port)s/v2.0
+catalog.RegionOne.identity.name = Identity Service
+
+# fake compute service for now to help novaclient tests work
+catalog.RegionOne.compute.publicURL = http://localhost:8774/v1.1/$(tenant_id)s
+catalog.RegionOne.compute.adminURL = http://localhost:8774/v1.1/$(tenant_id)s
+catalog.RegionOne.compute.internalURL = http://localhost:8774/v1.1/$(tenant_id)s
+catalog.RegionOne.compute.name = Compute Service
+
+catalog.RegionOne.volume.publicURL = http://localhost:8776/v1/$(tenant_id)s
+catalog.RegionOne.volume.adminURL = http://localhost:8776/v1/$(tenant_id)s
+catalog.RegionOne.volume.internalURL = http://localhost:8776/v1/$(tenant_id)s
+catalog.RegionOne.volume.name = Volume Service
+
+catalog.RegionOne.ec2.publicURL = http://localhost:8773/services/Cloud
+catalog.RegionOne.ec2.adminURL = http://localhost:8773/services/Admin
+catalog.RegionOne.ec2.internalURL = http://localhost:8773/services/Cloud
+catalog.RegionOne.ec2.name = EC2 Service
+
+catalog.RegionOne.image.publicURL = http://localhost:9292/v1
+catalog.RegionOne.image.adminURL = http://localhost:9292/v1
+catalog.RegionOne.image.internalURL = http://localhost:9292/v1
+catalog.RegionOne.image.name = Image Service
diff --git a/keystone-moon/etc/keystone-paste.ini b/keystone-moon/etc/keystone-paste.ini
new file mode 100644 (file)
index 0000000..24f167f
--- /dev/null
@@ -0,0 +1,109 @@
+# Keystone PasteDeploy configuration file.
+
+[filter:moon]
+paste.filter_factory = keystone.contrib.moon.routers:Admin.factory
+
+[filter:debug]
+paste.filter_factory = keystone.common.wsgi:Debug.factory
+
+[filter:request_id]
+paste.filter_factory = oslo_middleware:RequestId.factory
+
+[filter:build_auth_context]
+paste.filter_factory = keystone.middleware:AuthContextMiddleware.factory
+
+[filter:token_auth]
+paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
+
+[filter:admin_token_auth]
+paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
+
+[filter:json_body]
+paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
+
+[filter:user_crud_extension]
+paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory
+
+[filter:crud_extension]
+paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
+
+[filter:ec2_extension]
+paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
+
+[filter:ec2_extension_v3]
+paste.filter_factory = keystone.contrib.ec2:Ec2ExtensionV3.factory
+
+[filter:federation_extension]
+paste.filter_factory = keystone.contrib.federation.routers:FederationExtension.factory
+
+[filter:oauth1_extension]
+paste.filter_factory = keystone.contrib.oauth1.routers:OAuth1Extension.factory
+
+[filter:s3_extension]
+paste.filter_factory = keystone.contrib.s3:S3Extension.factory
+
+[filter:endpoint_filter_extension]
+paste.filter_factory = keystone.contrib.endpoint_filter.routers:EndpointFilterExtension.factory
+
+[filter:endpoint_policy_extension]
+paste.filter_factory = keystone.contrib.endpoint_policy.routers:EndpointPolicyExtension.factory
+
+[filter:simple_cert_extension]
+paste.filter_factory = keystone.contrib.simple_cert:SimpleCertExtension.factory
+
+[filter:revoke_extension]
+paste.filter_factory = keystone.contrib.revoke.routers:RevokeExtension.factory
+
+[filter:url_normalize]
+paste.filter_factory = keystone.middleware:NormalizingFilter.factory
+
+[filter:sizelimit]
+paste.filter_factory = oslo_middleware.sizelimit:RequestBodySizeLimiter.factory
+
+[app:public_service]
+paste.app_factory = keystone.service:public_app_factory
+
+[app:service_v3]
+paste.app_factory = keystone.service:v3_app_factory
+
+[app:admin_service]
+paste.app_factory = keystone.service:admin_app_factory
+
+[pipeline:public_api]
+# The last item in this pipeline must be public_service or an equivalent
+# application. It cannot be a filter.
+pipeline = sizelimit url_normalize request_id build_auth_context token_auth admin_token_auth json_body ec2_extension user_crud_extension public_service
+
+[pipeline:admin_api]
+# The last item in this pipeline must be admin_service or an equivalent
+# application. It cannot be a filter.
+pipeline = sizelimit url_normalize request_id build_auth_context token_auth admin_token_auth json_body ec2_extension s3_extension crud_extension admin_service
+
+[pipeline:api_v3]
+# The last item in this pipeline must be service_v3 or an equivalent
+# application. It cannot be a filter.
+pipeline = sizelimit url_normalize request_id build_auth_context token_auth admin_token_auth json_body ec2_extension_v3 s3_extension simple_cert_extension revoke_extension federation_extension oauth1_extension endpoint_filter_extension endpoint_policy_extension service_v3
+
+[app:public_version_service]
+paste.app_factory = keystone.service:public_version_app_factory
+
+[app:admin_version_service]
+paste.app_factory = keystone.service:admin_version_app_factory
+
+[pipeline:public_version_api]
+pipeline = sizelimit url_normalize public_version_service
+
+[pipeline:admin_version_api]
+pipeline = sizelimit url_normalize admin_version_service
+
+[composite:main]
+use = egg:Paste#urlmap
+/v2.0 = public_api
+/v3 = api_v3
+/ = public_version_api
+
+[composite:admin]
+use = egg:Paste#urlmap
+/v2.0 = admin_api
+/v3 = api_v3
+/ = admin_version_api
diff --git a/keystone-moon/etc/keystone.conf.sample b/keystone-moon/etc/keystone.conf.sample
new file mode 100644 (file)
index 0000000..b3c741c
--- /dev/null
@@ -0,0 +1,1716 @@
+[DEFAULT]
+
+#
+# From keystone
+#
+
+# A "shared secret" that can be used to bootstrap Keystone. This "token" does
+# not represent a user, and carries no explicit authorization. To disable in
+# production (highly recommended), remove AdminTokenAuthMiddleware from your
+# paste application pipelines (for example, in keystone-paste.ini). (string
+# value)
+#admin_token = ADMIN
+
+# (Deprecated) The port which the OpenStack Compute service listens on. This
+# option was only used for string replacement in the templated catalog backend.
+# Templated catalogs should replace the "$(compute_port)s" substitution with
+# the static port of the compute service. As of Juno, this option is deprecated
+# and will be removed in the L release. (integer value)
+#compute_port = 8774
+
+# The base public endpoint URL for Keystone that is advertised to clients
+# (NOTE: this does NOT affect how Keystone listens for connections). Defaults
+# to the base host URL of the request. E.g. a request to
+# http://server:5000/v3/users will default to http://server:5000. You should
+# only need to set this value if the base URL contains a path (e.g. /prefix/v3)
+# or the endpoint should be found on a different server. (string value)
+#public_endpoint = <None>
+
+# The base admin endpoint URL for Keystone that is advertised to clients (NOTE:
+# this does NOT affect how Keystone listens for connections). Defaults to the
+# base host URL of the request. E.g. a request to http://server:35357/v3/users
+# will default to http://server:35357. You should only need to set this value
+# if the base URL contains a path (e.g. /prefix/v3) or the endpoint should be
+# found on a different server. (string value)
+#admin_endpoint = <None>
+
+# Maximum depth of the project hierarchy. WARNING: setting it to a large value
+# may adversely impact performance. (integer value)
+#max_project_tree_depth = 5
+
+# Limit the sizes of user & project ID/names. (integer value)
+#max_param_size = 64
+
+# Similar to max_param_size, but provides an exception for token values.
+# (integer value)
+#max_token_size = 8192
+
+# Similar to the member_role_name option, this represents the default role ID
+# used to associate users with their default projects in the v2 API. This will
+# be used as the explicit role where one is not specified by the v2 API.
+# (string value)
+#member_role_id = 9fe2ff9ee4384b1894a90878d3e92bab
+
+# This is the role name used in combination with the member_role_id option; see
+# that option for more detail. (string value)
+#member_role_name = _member_
+
+# The value passed as the keyword "rounds" to passlib's encrypt method.
+# (integer value)
+#crypt_strength = 40000
+
+# The maximum number of entities that will be returned in a collection, with no
+# limit set by default. This global limit may be then overridden for a specific
+# driver, by specifying a list_limit in the appropriate section (e.g.
+# [assignment]). (integer value)
+#list_limit = <None>
+
+# Set this to false if you want to enable the ability for user, group and
+# project entities to be moved between domains by updating their domain_id.
+# Allowing such movement is not recommended if the scope of a domain admin is
+# being restricted by use of an appropriate policy file (see
+# policy.v3cloudsample as an example). (boolean value)
+#domain_id_immutable = true
+
+# If set to true, strict password length checking is performed for password
+# manipulation. If a password exceeds the maximum length, the operation will
+# fail with an HTTP 403 Forbidden error. If set to false, passwords are
+# automatically truncated to the maximum length. (boolean value)
+#strict_password_check = false
+
+# The HTTP header used to determine the scheme for the original request, even
+# if it was removed by an SSL terminating proxy. Typical value is
+# "HTTP_X_FORWARDED_PROTO". (string value)
+#secure_proxy_ssl_header = <None>
+
+#
+# From keystone.notifications
+#
+
+# Default publisher_id for outgoing notifications (string value)
+#default_publisher_id = <None>
+
+# Define the notification format for Identity Service events. A "basic"
+# notification has information about the resource being operated on. A "cadf"
+# notification has the same information, as well as information about the
+# initiator of the event. Valid options are: basic and cadf (string value)
+#notification_format = basic
+
+#
+# From keystone.openstack.common.eventlet_backdoor
+#
+
+# Enable eventlet backdoor.  Acceptable values are 0, <port>, and
+# <start>:<end>, where 0 results in listening on a random tcp port number;
+# <port> results in listening on the specified port number (and not enabling
+# backdoor if that port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range of port numbers.
+# The chosen port is displayed in the service's log file. (string value)
+#backdoor_port = <None>
+
+#
+# From keystone.openstack.common.policy
+#
+
+# The JSON file that defines policies. (string value)
+#policy_file = policy.json
+
+# Default rule. Enforced when a requested rule is not found. (string value)
+#policy_default_rule = default
+
+# Directories where policy configuration files are stored. They can be relative
+# to any directory in the search path defined by the config_dir option, or
+# absolute paths. The file defined by policy_file must exist for these
+# directories to be searched. (multi valued)
+#policy_dirs = policy.d
+
+#
+# From oslo.log
+#
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING
+# level). (boolean value)
+#debug = false
+
+# Print more verbose output (set logging level to INFO instead of default
+# WARNING level). (boolean value)
+#verbose = false
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# DEPRECATED. A logging.Formatter log message format string which may use any
+# of the available logging.LogRecord attributes. This option is deprecated.
+# Please use logging_context_format_string and logging_default_format_string
+# instead. (string value)
+#log_format = <None>
+
+# Format string for %%(asctime)s in log records. Default: %(default)s . (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If no default is set, logging will
+# go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative --log-file paths. (string
+# value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Use syslog for logging. Existing syslog format is DEPRECATED during I, and
+# will change in J to honor RFC5424. (boolean value)
+#use_syslog = false
+
+# (Optional) Enables or disables syslog rfc5424 format for logging. If enabled,
+# prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The
+# format without the APP-NAME is deprecated in I, and will be removed in J.
+# (boolean value)
+#use_syslog_rfc_format = false
+
+# Syslog facility to receive log lines. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages without context. (string value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# List of logger=LEVEL pairs. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+#
+# From oslo.messaging
+#
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address. (string value)
+#rpc_zmq_bind_address = *
+
+# MatchMaker driver. (string value)
+#rpc_zmq_matchmaker = oslo_messaging._drivers.matchmaker.MatchMakerLocalhost
+
+# ZeroMQ receiver listening port. (integer value)
+#rpc_zmq_port = 9501
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts = 1
+
+# Maximum number of ingress messages to locally buffer per topic. Default is
+# unlimited. (integer value)
+#rpc_zmq_topic_backlog = <None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir = /var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match
+# "host" option, if running Nova. (string value)
+#rpc_zmq_host = localhost
+
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+# (integer value)
+#rpc_cast_timeout = 30
+
+# Heartbeat frequency. (integer value)
+#matchmaker_heartbeat_freq = 300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl = 600
+
+# Size of RPC thread pool. (integer value)
+#rpc_thread_pool_size = 64
+
+# Driver or drivers to handle sending notifications. (multi valued)
+#notification_driver =
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+#notification_topics = notifications
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout = 60
+
+# A URL representing the messaging driver to use and its full configuration. If
+# not set, we fall back to the rpc_backend option and driver specific
+# configuration. (string value)
+#transport_url = <None>
+
+# The messaging driver to use, defaults to rabbit. Other drivers include qpid
+# and zmq. (string value)
+#rpc_backend = rabbit
+
+# The default exchange under which topics are scoped. May be overridden by an
+# exchange name specified in the transport_url option. (string value)
+#control_exchange = keystone
+
+
+[assignment]
+
+#
+# From keystone
+#
+
+# Assignment backend driver. (string value)
+#driver = <None>
+
+
+[auth]
+
+#
+# From keystone
+#
+
+# Default auth methods. (list value)
+#methods = external,password,token
+
+# The password auth plugin module. (string value)
+#password = keystone.auth.plugins.password.Password
+
+# The token auth plugin module. (string value)
+#token = keystone.auth.plugins.token.Token
+
+# The external (REMOTE_USER) auth plugin module. (string value)
+#external = keystone.auth.plugins.external.DefaultDomain
+
+
+[cache]
+
+#
+# From keystone
+#
+
+# Prefix for building the configuration dictionary for the cache region. This
+# should not need to be changed unless there is another dogpile.cache region
+# with the same configuration name. (string value)
+#config_prefix = cache.keystone
+
+# Default TTL, in seconds, for any cached item in the dogpile.cache region.
+# This applies to any cached method that doesn't have an explicit cache
+# expiration time defined for it. (integer value)
+#expiration_time = 600
+
+# Dogpile.cache backend module. It is recommended that Memcache with pooling
+# (keystone.cache.memcache_pool) or Redis (dogpile.cache.redis) be used in
+# production deployments.  Small workloads (single process) like devstack can
+# use the dogpile.cache.memory backend. (string value)
+#backend = keystone.common.cache.noop
+
+# Arguments supplied to the backend module. Specify this option once per
+# argument to be passed to the dogpile.cache backend. Example format:
+# "<argname>:<value>". (multi valued)
+#backend_argument =
+
+# Proxy classes to import that will affect the way the dogpile.cache backend
+# functions. See the dogpile.cache documentation on changing-backend-behavior.
+# (list value)
+#proxies =
+
+# Global toggle for all caching using the should_cache_fn mechanism. (boolean
+# value)
+#enabled = false
+
+# Extra debugging from the cache backend (cache keys, get/set/delete/etc
+# calls). This is only really useful if you need to see the specific cache-
+# backend get/set/delete calls with the keys/values.  Typically this should be
+# left set to false. (boolean value)
+#debug_cache_backend = false
+
+# Memcache servers in the format of "host:port". (dogpile.cache.memcache and
+# keystone.cache.memcache_pool backends only). (list value)
+#memcache_servers = localhost:11211
+
+# Number of seconds memcached server is considered dead before it is tried
+# again. (dogpile.cache.memcache and keystone.cache.memcache_pool backends
+# only). (integer value)
+#memcache_dead_retry = 300
+
+# Timeout in seconds for every call to a server. (dogpile.cache.memcache and
+# keystone.cache.memcache_pool backends only). (integer value)
+#memcache_socket_timeout = 3
+
+# Max total number of open connections to every memcached server.
+# (keystone.cache.memcache_pool backend only). (integer value)
+#memcache_pool_maxsize = 10
+
+# Number of seconds a connection to memcached is held unused in the pool before
+# it is closed. (keystone.cache.memcache_pool backend only). (integer value)
+#memcache_pool_unused_timeout = 60
+
+# Number of seconds that an operation will wait to get a memcache client
+# connection. (integer value)
+#memcache_pool_connection_get_timeout = 10
+
+
+[catalog]
+
+#
+# From keystone
+#
+
+# Catalog template file name for use with the template catalog backend. (string
+# value)
+#template_file = default_catalog.templates
+
+# Catalog backend driver. (string value)
+#driver = keystone.catalog.backends.sql.Catalog
+
+# Toggle for catalog caching. This has no effect unless global caching is
+# enabled. (boolean value)
+#caching = true
+
+# Time to cache catalog data (in seconds). This has no effect unless global and
+# catalog caching are enabled. (integer value)
+#cache_time = <None>
+
+# Maximum number of entities that will be returned in a catalog collection.
+# (integer value)
+#list_limit = <None>
+
+
+[credential]
+
+#
+# From keystone
+#
+
+# Credential backend driver. (string value)
+#driver = keystone.credential.backends.sql.Credential
+
+
+[database]
+
+#
+# From oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+# Deprecated group/name - [DEFAULT]/sqlite_db
+#sqlite_db = oslo.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+# Deprecated group/name - [DEFAULT]/sqlite_synchronous
+#sqlite_synchronous = true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend = sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the database. (string
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+
+# The SQLAlchemy connection string to use to connect to the slave database.
+# (string value)
+#slave_connection = <None>
+
+# The SQL mode to be used for MySQL sessions. This option, including the
+# default, overrides any server-set SQL mode. To use whatever SQL mode is set
+# by the server configuration, set this to no value. Example: mysql_sql_mode=
+# (string value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum number of database connection retries during startup. Set to -1 to
+# specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a SQL connection. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with SQLAlchemy. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = <None>
+
+# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add Python stack traces to SQL as comment strings. (boolean value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = false
+
+# If set, use this value for pool_timeout with SQLAlchemy. (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on connection lost.
+# (boolean value)
+#use_db_reconnect = false
+
+# Seconds between retries of a database transaction. (integer value)
+#db_retry_interval = 1
+
+# If True, increases the interval between retries of a database operation up to
+# db_max_retry_interval. (boolean value)
+#db_inc_retry_interval = true
+
+# If db_inc_retry_interval is set, the maximum seconds between retries of a
+# database operation. (integer value)
+#db_max_retry_interval = 10
+
+# Maximum retries in case of connection error or deadlock error before error is
+# raised. Set to -1 to specify an infinite retry count. (integer value)
+#db_max_retries = 20
+
+
+[endpoint_filter]
+
+#
+# From keystone
+#
+
+# Endpoint Filter backend driver (string value)
+#driver = keystone.contrib.endpoint_filter.backends.sql.EndpointFilter
+
+# Toggle to return all active endpoints if no filter exists. (boolean value)
+#return_all_endpoints_if_no_filter = true
+
+
+[endpoint_policy]
+
+#
+# From keystone
+#
+
+# Endpoint policy backend driver (string value)
+#driver = keystone.contrib.endpoint_policy.backends.sql.EndpointPolicy
+
+
+[eventlet_server]
+
+#
+# From keystone
+#
+
+# The number of worker processes to serve the public eventlet application.
+# Defaults to number of CPUs (minimum of 2). (integer value)
+# Deprecated group/name - [DEFAULT]/public_workers
+#public_workers = <None>
+
+# The number of worker processes to serve the admin eventlet application.
+# Defaults to number of CPUs (minimum of 2). (integer value)
+# Deprecated group/name - [DEFAULT]/admin_workers
+#admin_workers = <None>
+
+# The IP address of the network interface for the public service to listen on.
+# (string value)
+# Deprecated group/name - [DEFAULT]/bind_host
+# Deprecated group/name - [DEFAULT]/public_bind_host
+#public_bind_host = 0.0.0.0
+
+# The port number which the public service listens on. (integer value)
+# Deprecated group/name - [DEFAULT]/public_port
+#public_port = 5000
+
+# The IP address of the network interface for the admin service to listen on.
+# (string value)
+# Deprecated group/name - [DEFAULT]/bind_host
+# Deprecated group/name - [DEFAULT]/admin_bind_host
+#admin_bind_host = 0.0.0.0
+
+# The port number which the admin service listens on. (integer value)
+# Deprecated group/name - [DEFAULT]/admin_port
+#admin_port = 35357
+
+# Set this to true if you want to enable TCP_KEEPALIVE on server sockets, i.e.
+# sockets used by the Keystone wsgi server for client connections. (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/tcp_keepalive
+#tcp_keepalive = false
+
+# Sets the value of TCP_KEEPIDLE in seconds for each server socket. Only
+# applies if tcp_keepalive is true. (integer value)
+# Deprecated group/name - [DEFAULT]/tcp_keepidle
+#tcp_keepidle = 600
+
+
+[eventlet_server_ssl]
+
+#
+# From keystone
+#
+
+# Toggle for SSL support on the Keystone eventlet servers. (boolean value)
+# Deprecated group/name - [ssl]/enable
+#enable = false
+
+# Path of the certfile for SSL. For non-production environments, you may be
+# interested in using `keystone-manage ssl_setup` to generate self-signed
+# certificates. (string value)
+# Deprecated group/name - [ssl]/certfile
+#certfile = /etc/keystone/ssl/certs/keystone.pem
+
+# Path of the keyfile for SSL. (string value)
+# Deprecated group/name - [ssl]/keyfile
+#keyfile = /etc/keystone/ssl/private/keystonekey.pem
+
+# Path of the CA cert file for SSL. (string value)
+# Deprecated group/name - [ssl]/ca_certs
+#ca_certs = /etc/keystone/ssl/certs/ca.pem
+
+# Require client certificate. (boolean value)
+# Deprecated group/name - [ssl]/cert_required
+#cert_required = false
+
+
+[federation]
+
+#
+# From keystone
+#
+
+# Federation backend driver. (string value)
+#driver = keystone.contrib.federation.backends.sql.Federation
+
+# Value to be used when filtering assertion parameters from the environment.
+# (string value)
+#assertion_prefix =
+
+# Value to be used to obtain the entity ID of the Identity Provider from the
+# environment (e.g. if using the mod_shib plugin this value is `Shib-Identity-
+# Provider`). (string value)
+#remote_id_attribute = <None>
+
+# A domain name that is reserved to allow federated ephemeral users to have a
+# domain concept. Note that an admin will not be able to create a domain with
+# this name or update an existing domain to this name. You are not advised to
+# change this value unless you really have to. Changing this option to empty
+# string or None will not have any impact and default name will be used.
+# (string value)
+#federated_domain_name = Federated
+
+# A list of trusted dashboard hosts. Before accepting a Single Sign-On request
+# to return a token, the origin host must be a member of the trusted_dashboard
+# list. This configuration option may be repeated for multiple values. For
+# example: trusted_dashboard=http://acme.com trusted_dashboard=http://beta.com
+# (multi valued)
+#trusted_dashboard =
+
+# Location of Single Sign-On callback handler, will return a token to a trusted
+# dashboard host. (string value)
+#sso_callback_template = /etc/keystone/sso_callback_template.html
+
+
+[fernet_tokens]
+
+#
+# From keystone
+#
+
+# Directory containing Fernet token keys. (string value)
+#key_repository = /etc/keystone/fernet-keys/
+
+# This controls how many keys are held in rotation by keystone-manage
+# fernet_rotate before they are discarded. The default value of 3 means that
+# keystone will maintain one staged key, one primary key, and one secondary
+# key. Increasing this value means that additional secondary keys will be kept
+# in the rotation. (integer value)
+#max_active_keys = 3
+
+
+[identity]
+
+#
+# From keystone
+#
+
+# This references the domain to use for all Identity API v2 requests (which are
+# not aware of domains). A domain with this ID will be created for you by
+# keystone-manage db_sync in migration 008. The domain referenced by this ID
+# cannot be deleted on the v3 API, to prevent accidentally breaking the v2 API.
+# There is nothing special about this domain, other than the fact that it must
+# exist to order to maintain support for your v2 clients. (string value)
+#default_domain_id = default
+
+# A subset (or all) of domains can have their own identity driver, each with
+# their own partial configuration options, stored in either the resource
+# backend or in a file in a domain configuration directory (depending on the
+# setting of domain_configurations_from_database). Only values specific to the
+# domain need to be specified in this manner. This feature is disabled by
+# default; set to true to enable. (boolean value)
+#domain_specific_drivers_enabled = false
+
+# Extract the domain specific configuration options from the resource backend
+# where they have been stored with the domain data. This feature is disabled by
+# default (in which case the domain specific options will be loaded from files
+# in the domain configuration directory); set to true to enable. This feature
+# is not yet supported. (boolean value)
+#domain_configurations_from_database = false
+
+# Path for Keystone to locate the domain specific identity configuration files
+# if domain_specific_drivers_enabled is set to true. (string value)
+#domain_config_dir = /etc/keystone/domains
+
+# Identity backend driver. (string value)
+#driver = keystone.identity.backends.sql.Identity
+
+# Toggle for identity caching. This has no effect unless global caching is
+# enabled. (boolean value)
+#caching = true
+
+# Time to cache identity data (in seconds). This has no effect unless global
+# and identity caching are enabled. (integer value)
+#cache_time = 600
+
+# Maximum supported length for user passwords; decrease to improve performance.
+# (integer value)
+#max_password_length = 4096
+
+# Maximum number of entities that will be returned in an identity collection.
+# (integer value)
+#list_limit = <None>
+
+
+[identity_mapping]
+
+#
+# From keystone
+#
+
+# Keystone Identity Mapping backend driver. (string value)
+#driver = keystone.identity.mapping_backends.sql.Mapping
+
+# Public ID generator for user and group entities. The Keystone identity mapper
+# only supports generators that produce no more than 64 characters. (string
+# value)
+#generator = keystone.identity.id_generators.sha256.Generator
+
+# The format of user and group IDs changed in Juno for backends that do not
+# generate UUIDs (e.g. LDAP), with keystone providing a hash mapping to the
+# underlying attribute in LDAP. By default this mapping is disabled, which
+# ensures that existing IDs will not change. Even when the mapping is enabled
+# by using domain specific drivers, any users and groups from the default
+# domain being handled by LDAP will still not be mapped to ensure their IDs
+# remain backward compatible. Setting this value to False will enable the
+# mapping for even the default LDAP driver. It is only safe to do this if you
+# do not already have assignments for users and groups from the default LDAP
+# domain, and it is acceptable for Keystone to provide the different IDs to
+# clients than it did previously.  Typically this means that the only time you
+# can set this value to False is when configuring a fresh installation.
+# (boolean value)
+#backward_compatible_ids = true
+
+
+[kvs]
+
+#
+# From keystone
+#
+
+# Extra dogpile.cache backend modules to register with the dogpile.cache
+# library. (list value)
+#backends =
+
+# Prefix for building the configuration dictionary for the KVS region. This
+# should not need to be changed unless there is another dogpile.cache region
+# with the same configuration name. (string value)
+#config_prefix = keystone.kvs
+
+# Toggle to disable using a key-mangling function to ensure fixed length keys.
+# This is toggle-able for debugging purposes, it is highly recommended to
+# always leave this set to true. (boolean value)
+#enable_key_mangler = true
+
+# Default lock timeout for distributed locking. (integer value)
+#default_lock_timeout = 5
+
+
+[ldap]
+
+#
+# From keystone
+#
+
+# URL for connecting to the LDAP server. (string value)
+#url = ldap://localhost
+
+# User BindDN to query the LDAP server. (string value)
+#user = <None>
+
+# Password for the BindDN to query the LDAP server. (string value)
+#password = <None>
+
+# LDAP server suffix (string value)
+#suffix = cn=example,cn=com
+
+# If true, will add a dummy member to groups. This is required if the
+# objectclass for groups requires the "member" attribute. (boolean value)
+#use_dumb_member = false
+
+# DN of the "dummy member" to use when "use_dumb_member" is enabled. (string
+# value)
+#dumb_member = cn=dumb,dc=nonexistent
+
+# Delete subtrees using the subtree delete control. Only enable this option if
+# your LDAP server supports subtree deletion. (boolean value)
+#allow_subtree_delete = false
+
+# The LDAP scope for queries, this can be either "one" (onelevel/singleLevel)
+# or "sub" (subtree/wholeSubtree). (string value)
+#query_scope = one
+
+# Maximum results per page; a value of zero ("0") disables paging. (integer
+# value)
+#page_size = 0
+
+# The LDAP dereferencing option for queries. This can be either "never",
+# "searching", "always", "finding" or "default". The "default" option falls
+# back to using default dereferencing configured by your ldap.conf. (string
+# value)
+#alias_dereferencing = default
+
+# Sets the LDAP debugging level for LDAP calls. A value of 0 means that
+# debugging is not enabled. This value is a bitmask, consult your LDAP
+# documentation for possible values. (integer value)
+#debug_level = <None>
+
+# Override the system's default referral chasing behavior for queries. (boolean
+# value)
+#chase_referrals = <None>
+
+# Search base for users. (string value)
+#user_tree_dn = <None>
+
+# LDAP search filter for users. (string value)
+#user_filter = <None>
+
+# LDAP objectclass for users. (string value)
+#user_objectclass = inetOrgPerson
+
+# LDAP attribute mapped to user id. WARNING: must not be a multivalued
+# attribute. (string value)
+#user_id_attribute = cn
+
+# LDAP attribute mapped to user name. (string value)
+#user_name_attribute = sn
+
+# LDAP attribute mapped to user email. (string value)
+#user_mail_attribute = mail
+
+# LDAP attribute mapped to password. (string value)
+#user_pass_attribute = userPassword
+
+# LDAP attribute mapped to user enabled flag. (string value)
+#user_enabled_attribute = enabled
+
+# Invert the meaning of the boolean enabled values. Some LDAP servers use a
+# boolean lock attribute where "true" means an account is disabled. Setting
+# "user_enabled_invert = true" will allow these lock attributes to be used.
+# This setting will have no effect if "user_enabled_mask" or
+# "user_enabled_emulation" settings are in use. (boolean value)
+#user_enabled_invert = false
+
+# Bitmask integer to indicate the bit that the enabled value is stored in if
+# the LDAP server represents "enabled" as a bit on an integer rather than a
+# boolean. A value of "0" indicates the mask is not used. If this is not set to
+# "0" the typical value is "2". This is typically used when
+# "user_enabled_attribute = userAccountControl". (integer value)
+#user_enabled_mask = 0
+
+# Default value to enable users. This should match an appropriate int value if
+# the LDAP server uses non-boolean (bitmask) values to indicate if a user is
+# enabled or disabled. If this is not set to "True" the typical value is "512".
+# This is typically used when "user_enabled_attribute = userAccountControl".
+# (string value)
+#user_enabled_default = True
+
+# List of attributes stripped off the user on update. (list value)
+#user_attribute_ignore = default_project_id,tenants
+
+# LDAP attribute mapped to default_project_id for users. (string value)
+#user_default_project_id_attribute = <None>
+
+# Allow user creation in LDAP backend. (boolean value)
+#user_allow_create = true
+
+# Allow user updates in LDAP backend. (boolean value)
+#user_allow_update = true
+
+# Allow user deletion in LDAP backend. (boolean value)
+#user_allow_delete = true
+
+# If true, Keystone uses an alternative method to determine if a user is
+# enabled or not by checking if they are a member of the
+# "user_enabled_emulation_dn" group. (boolean value)
+#user_enabled_emulation = false
+
+# DN of the group entry to hold enabled users when using enabled emulation.
+# (string value)
+#user_enabled_emulation_dn = <None>
+
+# List of additional LDAP attributes used for mapping additional attribute
+# mappings for users. Attribute mapping format is <ldap_attr>:<user_attr>,
+# where ldap_attr is the attribute in the LDAP entry and user_attr is the
+# Identity API attribute. (list value)
+#user_additional_attribute_mapping =
+
+# Search base for projects (string value)
+# Deprecated group/name - [ldap]/tenant_tree_dn
+#project_tree_dn = <None>
+
+# LDAP search filter for projects. (string value)
+# Deprecated group/name - [ldap]/tenant_filter
+#project_filter = <None>
+
+# LDAP objectclass for projects. (string value)
+# Deprecated group/name - [ldap]/tenant_objectclass
+#project_objectclass = groupOfNames
+
+# LDAP attribute mapped to project id. (string value)
+# Deprecated group/name - [ldap]/tenant_id_attribute
+#project_id_attribute = cn
+
+# LDAP attribute mapped to project membership for user. (string value)
+# Deprecated group/name - [ldap]/tenant_member_attribute
+#project_member_attribute = member
+
+# LDAP attribute mapped to project name. (string value)
+# Deprecated group/name - [ldap]/tenant_name_attribute
+#project_name_attribute = ou
+
+# LDAP attribute mapped to project description. (string value)
+# Deprecated group/name - [ldap]/tenant_desc_attribute
+#project_desc_attribute = description
+
+# LDAP attribute mapped to project enabled. (string value)
+# Deprecated group/name - [ldap]/tenant_enabled_attribute
+#project_enabled_attribute = enabled
+
+# LDAP attribute mapped to project domain_id. (string value)
+# Deprecated group/name - [ldap]/tenant_domain_id_attribute
+#project_domain_id_attribute = businessCategory
+
+# List of attributes stripped off the project on update. (list value)
+# Deprecated group/name - [ldap]/tenant_attribute_ignore
+#project_attribute_ignore =
+
+# Allow project creation in LDAP backend. (boolean value)
+# Deprecated group/name - [ldap]/tenant_allow_create
+#project_allow_create = true
+
+# Allow project update in LDAP backend. (boolean value)
+# Deprecated group/name - [ldap]/tenant_allow_update
+#project_allow_update = true
+
+# Allow project deletion in LDAP backend. (boolean value)
+# Deprecated group/name - [ldap]/tenant_allow_delete
+#project_allow_delete = true
+
+# If true, Keystone uses an alternative method to determine if a project is
+# enabled or not by checking if they are a member of the
+# "project_enabled_emulation_dn" group. (boolean value)
+# Deprecated group/name - [ldap]/tenant_enabled_emulation
+#project_enabled_emulation = false
+
+# DN of the group entry to hold enabled projects when using enabled emulation.
+# (string value)
+# Deprecated group/name - [ldap]/tenant_enabled_emulation_dn
+#project_enabled_emulation_dn = <None>
+
+# Additional attribute mappings for projects. Attribute mapping format is
+# <ldap_attr>:<user_attr>, where ldap_attr is the attribute in the LDAP entry
+# and user_attr is the Identity API attribute. (list value)
+# Deprecated group/name - [ldap]/tenant_additional_attribute_mapping
+#project_additional_attribute_mapping =
+
+# Search base for roles. (string value)
+#role_tree_dn = <None>
+
+# LDAP search filter for roles. (string value)
+#role_filter = <None>
+
+# LDAP objectclass for roles. (string value)
+#role_objectclass = organizationalRole
+
+# LDAP attribute mapped to role id. (string value)
+#role_id_attribute = cn
+
+# LDAP attribute mapped to role name. (string value)
+#role_name_attribute = ou
+
+# LDAP attribute mapped to role membership. (string value)
+#role_member_attribute = roleOccupant
+
+# List of attributes stripped off the role on update. (list value)
+#role_attribute_ignore =
+
+# Allow role creation in LDAP backend. (boolean value)
+#role_allow_create = true
+
+# Allow role update in LDAP backend. (boolean value)
+#role_allow_update = true
+
+# Allow role deletion in LDAP backend. (boolean value)
+#role_allow_delete = true
+
+# Additional attribute mappings for roles. Attribute mapping format is
+# <ldap_attr>:<user_attr>, where ldap_attr is the attribute in the LDAP entry
+# and user_attr is the Identity API attribute. (list value)
+#role_additional_attribute_mapping =
+
+# Search base for groups. (string value)
+#group_tree_dn = <None>
+
+# LDAP search filter for groups. (string value)
+#group_filter = <None>
+
+# LDAP objectclass for groups. (string value)
+#group_objectclass = groupOfNames
+
+# LDAP attribute mapped to group id. (string value)
+#group_id_attribute = cn
+
+# LDAP attribute mapped to group name. (string value)
+#group_name_attribute = ou
+
+# LDAP attribute mapped to show group membership. (string value)
+#group_member_attribute = member
+
+# LDAP attribute mapped to group description. (string value)
+#group_desc_attribute = description
+
+# List of attributes stripped off the group on update. (list value)
+#group_attribute_ignore =
+
+# Allow group creation in LDAP backend. (boolean value)
+#group_allow_create = true
+
+# Allow group update in LDAP backend. (boolean value)
+#group_allow_update = true
+
+# Allow group deletion in LDAP backend. (boolean value)
+#group_allow_delete = true
+
+# Additional attribute mappings for groups. Attribute mapping format is
+# <ldap_attr>:<user_attr>, where ldap_attr is the attribute in the LDAP entry
+# and user_attr is the Identity API attribute. (list value)
+#group_additional_attribute_mapping =
+
+# CA certificate file path for communicating with LDAP servers. (string value)
+#tls_cacertfile = <None>
+
+# CA certificate directory path for communicating with LDAP servers. (string
+# value)
+#tls_cacertdir = <None>
+
+# Enable TLS for communicating with LDAP servers. (boolean value)
+#use_tls = false
+
+# Valid options for tls_req_cert are demand, never, and allow. (string value)
+#tls_req_cert = demand
+
+# Enable LDAP connection pooling. (boolean value)
+#use_pool = false
+
+# Connection pool size. (integer value)
+#pool_size = 10
+
+# Maximum count of reconnect trials. (integer value)
+#pool_retry_max = 3
+
+# Time span in seconds to wait between two reconnect trials. (floating point
+# value)
+#pool_retry_delay = 0.1
+
+# Connector timeout in seconds. Value -1 indicates indefinite wait for
+# response. (integer value)
+#pool_connection_timeout = -1
+
+# Connection lifetime in seconds. (integer value)
+#pool_connection_lifetime = 600
+
+# Enable LDAP connection pooling for end user authentication. If use_pool is
+# disabled, then this setting is meaningless and is not used at all. (boolean
+# value)
+#use_auth_pool = false
+
+# End user auth connection pool size. (integer value)
+#auth_pool_size = 100
+
+# End user auth connection lifetime in seconds. (integer value)
+#auth_pool_connection_lifetime = 60
+
+
+[matchmaker_redis]
+
+#
+# From oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host = 127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port = 6379
+
+# Password for Redis server (optional). (string value)
+#password = <None>
+
+
+[matchmaker_ring]
+
+#
+# From oslo.messaging
+#
+
+# Matchmaker ring file (JSON). (string value)
+# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
+#ringfile = /etc/oslo/matchmaker_ring.json
+
+
+[memcache]
+
+#
+# From keystone
+#
+
+# Memcache servers in the format of "host:port". (list value)
+#servers = localhost:11211
+
+# Number of seconds memcached server is considered dead before it is tried
+# again. This is used by the key value store system (e.g. token pooled
+# memcached persistence backend). (integer value)
+#dead_retry = 300
+
+# Timeout in seconds for every call to a server. This is used by the key value
+# store system (e.g. token pooled memcached persistence backend). (integer
+# value)
+#socket_timeout = 3
+
+# Max total number of open connections to every memcached server. This is used
+# by the key value store system (e.g. token pooled memcached persistence
+# backend). (integer value)
+#pool_maxsize = 10
+
+# Number of seconds a connection to memcached is held unused in the pool before
+# it is closed. This is used by the key value store system (e.g. token pooled
+# memcached persistence backend). (integer value)
+#pool_unused_timeout = 60
+
+# Number of seconds that an operation will wait to get a memcache client
+# connection. This is used by the key value store system (e.g. token pooled
+# memcached persistence backend). (integer value)
+#pool_connection_get_timeout = 10
+
+
+[oauth1]
+
+#
+# From keystone
+#
+
+# Credential backend driver. (string value)
+#driver = keystone.contrib.oauth1.backends.sql.OAuth1
+
+# Duration (in seconds) for the OAuth Request Token. (integer value)
+#request_token_duration = 28800
+
+# Duration (in seconds) for the OAuth Access Token. (integer value)
+#access_token_duration = 86400
+
+
+[os_inherit]
+
+#
+# From keystone
+#
+
+# role-assignment inheritance to projects from owning domain or from projects
+# higher in the hierarchy can be optionally enabled. (boolean value)
+#enabled = false
+
+
+[oslo_messaging_amqp]
+
+#
+# From oslo.messaging
+#
+
+# address prefix used when sending to a specific server (string value)
+# Deprecated group/name - [amqp1]/server_request_prefix
+#server_request_prefix = exclusive
+
+# address prefix used when broadcasting to all servers (string value)
+# Deprecated group/name - [amqp1]/broadcast_prefix
+#broadcast_prefix = broadcast
+
+# address prefix when sending to any server in group (string value)
+# Deprecated group/name - [amqp1]/group_request_prefix
+#group_request_prefix = unicast
+
+# Name for the AMQP container (string value)
+# Deprecated group/name - [amqp1]/container_name
+#container_name = <None>
+
+# Timeout for inactive connections (in seconds) (integer value)
+# Deprecated group/name - [amqp1]/idle_timeout
+#idle_timeout = 0
+
+# Debug: dump AMQP frames to stdout (boolean value)
+# Deprecated group/name - [amqp1]/trace
+#trace = false
+
+# CA certificate PEM file for verifing server certificate (string value)
+# Deprecated group/name - [amqp1]/ssl_ca_file
+#ssl_ca_file =
+
+# Identifying certificate PEM file to present to clients (string value)
+# Deprecated group/name - [amqp1]/ssl_cert_file
+#ssl_cert_file =
+
+# Private key PEM file used to sign cert_file certificate (string value)
+# Deprecated group/name - [amqp1]/ssl_key_file
+#ssl_key_file =
+
+# Password for decrypting ssl_key_file (if encrypted) (string value)
+# Deprecated group/name - [amqp1]/ssl_key_password
+#ssl_key_password = <None>
+
+# Accept clients using either SSL or plain TCP (boolean value)
+# Deprecated group/name - [amqp1]/allow_insecure_clients
+#allow_insecure_clients = false
+
+
+[oslo_messaging_qpid]
+
+#
+# From oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues = false
+
+# Auto-delete queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_auto_delete
+#amqp_auto_delete = false
+
+# Size of RPC connection pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
+#rpc_conn_pool_size = 30
+
+# Qpid broker hostname. (string value)
+# Deprecated group/name - [DEFAULT]/qpid_hostname
+#qpid_hostname = localhost
+
+# Qpid broker port. (integer value)
+# Deprecated group/name - [DEFAULT]/qpid_port
+#qpid_port = 5672
+
+# Qpid HA cluster host:port pairs. (list value)
+# Deprecated group/name - [DEFAULT]/qpid_hosts
+#qpid_hosts = $qpid_hostname:$qpid_port
+
+# Username for Qpid connection. (string value)
+# Deprecated group/name - [DEFAULT]/qpid_username
+#qpid_username =
+
+# Password for Qpid connection. (string value)
+# Deprecated group/name - [DEFAULT]/qpid_password
+#qpid_password =
+
+# Space separated list of SASL mechanisms to use for auth. (string value)
+# Deprecated group/name - [DEFAULT]/qpid_sasl_mechanisms
+#qpid_sasl_mechanisms =
+
+# Seconds between connection keepalive heartbeats. (integer value)
+# Deprecated group/name - [DEFAULT]/qpid_heartbeat
+#qpid_heartbeat = 60
+
+# Transport to use, either 'tcp' or 'ssl'. (string value)
+# Deprecated group/name - [DEFAULT]/qpid_protocol
+#qpid_protocol = tcp
+
+# Whether to disable the Nagle algorithm. (boolean value)
+# Deprecated group/name - [DEFAULT]/qpid_tcp_nodelay
+#qpid_tcp_nodelay = true
+
+# The number of prefetched messages held by receiver. (integer value)
+# Deprecated group/name - [DEFAULT]/qpid_receiver_capacity
+#qpid_receiver_capacity = 1
+
+# The qpid topology version to use.  Version 1 is what was originally used by
+# impl_qpid.  Version 2 includes some backwards-incompatible changes that allow
+# broker federation to work.  Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break. (integer value)
+# Deprecated group/name - [DEFAULT]/qpid_topology_version
+#qpid_topology_version = 1
+
+
+[oslo_messaging_rabbit]
+
+#
+# From oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues = false
+
+# Auto-delete queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_auto_delete
+#amqp_auto_delete = false
+
+# Size of RPC connection pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
+#rpc_conn_pool_size = 30
+
+# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
+# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
+# distributions. (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_version
+#kombu_ssl_version =
+
+# SSL key file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile
+#kombu_ssl_keyfile =
+
+# SSL cert file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile
+#kombu_ssl_certfile =
+
+# SSL certification authority file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs
+#kombu_ssl_ca_certs =
+
+# How long to wait before reconnecting in response to an AMQP consumer cancel
+# notification. (floating point value)
+# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay
+#kombu_reconnect_delay = 1.0
+
+# The RabbitMQ broker address where a single node is used. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_host
+#rabbit_host = localhost
+
+# The RabbitMQ broker port where a single node is used. (integer value)
+# Deprecated group/name - [DEFAULT]/rabbit_port
+#rabbit_port = 5672
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+# Deprecated group/name - [DEFAULT]/rabbit_hosts
+#rabbit_hosts = $rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_use_ssl
+#rabbit_use_ssl = false
+
+# The RabbitMQ userid. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_userid
+#rabbit_userid = guest
+
+# The RabbitMQ password. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_password
+#rabbit_password = guest
+
+# The RabbitMQ login method. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_login_method
+#rabbit_login_method = AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_virtual_host
+#rabbit_virtual_host = /
+
+# How frequently to retry connecting with RabbitMQ. (integer value)
+#rabbit_retry_interval = 1
+
+# How long to backoff for between retries when connecting to RabbitMQ. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff
+#rabbit_retry_backoff = 2
+
+# Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry
+# count). (integer value)
+# Deprecated group/name - [DEFAULT]/rabbit_max_retries
+#rabbit_max_retries = 0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you
+# must wipe the RabbitMQ database. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_ha_queues
+#rabbit_ha_queues = false
+
+# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value)
+# Deprecated group/name - [DEFAULT]/fake_rabbit
+#fake_rabbit = false
+
+
+[oslo_middleware]
+
+#
+# From oslo.middleware
+#
+
+# The maximum body size for each  request, in bytes. (integer value)
+# Deprecated group/name - [DEFAULT]/osapi_max_request_body_size
+# Deprecated group/name - [DEFAULT]/max_request_body_size
+#max_request_body_size = 114688
+
+
+[paste_deploy]
+
+#
+# From keystone
+#
+
+# Name of the paste configuration file that defines the available pipelines.
+# (string value)
+#config_file = keystone-paste.ini
+
+
+[policy]
+
+#
+# From keystone
+#
+
+# Policy backend driver. (string value)
+#driver = keystone.policy.backends.sql.Policy
+
+# Maximum number of entities that will be returned in a policy collection.
+# (integer value)
+#list_limit = <None>
+
+
+[resource]
+
+#
+# From keystone
+#
+
+# Resource backend driver. If a resource driver is not specified, the
+# assignment driver will choose the resource driver. (string value)
+#driver = <None>
+
+# Toggle for resource caching. This has no effect unless global caching is
+# enabled. (boolean value)
+# Deprecated group/name - [assignment]/caching
+#caching = true
+
+# TTL (in seconds) to cache resource data. This has no effect unless global
+# caching is enabled. (integer value)
+# Deprecated group/name - [assignment]/cache_time
+#cache_time = <None>
+
+# Maximum number of entities that will be returned in a resource collection.
+# (integer value)
+# Deprecated group/name - [assignment]/list_limit
+#list_limit = <None>
+
+
+[revoke]
+
+#
+# From keystone
+#
+
+# An implementation of the backend for persisting revocation events. (string
+# value)
+#driver = keystone.contrib.revoke.backends.sql.Revoke
+
+# This value (calculated in seconds) is added to token expiration before a
+# revocation event may be removed from the backend. (integer value)
+#expiration_buffer = 1800
+
+# Toggle for revocation event caching. This has no effect unless global caching
+# is enabled. (boolean value)
+#caching = true
+
+
+[role]
+
+#
+# From keystone
+#
+
+# Role backend driver. (string value)
+#driver = <None>
+
+# Toggle for role caching. This has no effect unless global caching is enabled.
+# (boolean value)
+#caching = true
+
+# TTL (in seconds) to cache role data. This has no effect unless global caching
+# is enabled. (integer value)
+#cache_time = <None>
+
+# Maximum number of entities that will be returned in a role collection.
+# (integer value)
+#list_limit = <None>
+
+
+[saml]
+
+#
+# From keystone
+#
+
+# Default TTL, in seconds, for any generated SAML assertion created by
+# Keystone. (integer value)
+#assertion_expiration_time = 3600
+
+# Binary to be called for XML signing. Install the appropriate package, specify
+# absolute path or adjust your PATH environment variable if the binary cannot
+# be found. (string value)
+#xmlsec1_binary = xmlsec1
+
+# Path of the certfile for SAML signing. For non-production environments, you
+# may be interested in using `keystone-manage pki_setup` to generate self-
+# signed certificates. Note, the path cannot contain a comma. (string value)
+#certfile = /etc/keystone/ssl/certs/signing_cert.pem
+
+# Path of the keyfile for SAML signing. Note, the path cannot contain a comma.
+# (string value)
+#keyfile = /etc/keystone/ssl/private/signing_key.pem
+
+# Entity ID value for unique Identity Provider identification. Usually FQDN is
+# set with a suffix. A value is required to generate IDP Metadata. For example:
+# https://keystone.example.com/v3/OS-FEDERATION/saml2/idp (string value)
+#idp_entity_id = <None>
+
+# Identity Provider Single-Sign-On service value, required in the Identity
+# Provider's metadata. A value is required to generate IDP Metadata. For
+# example: https://keystone.example.com/v3/OS-FEDERATION/saml2/sso (string
+# value)
+#idp_sso_endpoint = <None>
+
+# Language used by the organization. (string value)
+#idp_lang = en
+
+# Organization name the installation belongs to. (string value)
+#idp_organization_name = <None>
+
+# Organization name to be displayed. (string value)
+#idp_organization_display_name = <None>
+
+# URL of the organization. (string value)
+#idp_organization_url = <None>
+
+# Company of contact person. (string value)
+#idp_contact_company = <None>
+
+# Given name of contact person (string value)
+#idp_contact_name = <None>
+
+# Surname of contact person. (string value)
+#idp_contact_surname = <None>
+
+# Email address of contact person. (string value)
+#idp_contact_email = <None>
+
+# Telephone number of contact person. (string value)
+#idp_contact_telephone = <None>
+
+# Contact type. Allowed values are: technical, support, administrative billing,
+# and other (string value)
+#idp_contact_type = other
+
+# Path to the Identity Provider Metadata file. This file should be generated
+# with the keystone-manage saml_idp_metadata command. (string value)
+#idp_metadata_path = /etc/keystone/saml2_idp_metadata.xml
+
+
+[signing]
+
+#
+# From keystone
+#
+
+# Path of the certfile for token signing. For non-production environments, you
+# may be interested in using `keystone-manage pki_setup` to generate self-
+# signed certificates. (string value)
+#certfile = /etc/keystone/ssl/certs/signing_cert.pem
+
+# Path of the keyfile for token signing. (string value)
+#keyfile = /etc/keystone/ssl/private/signing_key.pem
+
+# Path of the CA for token signing. (string value)
+#ca_certs = /etc/keystone/ssl/certs/ca.pem
+
+# Path of the CA key for token signing. (string value)
+#ca_key = /etc/keystone/ssl/private/cakey.pem
+
+# Key size (in bits) for token signing cert (auto generated certificate).
+# (integer value)
+#key_size = 2048
+
+# Days the token signing cert is valid for (auto generated certificate).
+# (integer value)
+#valid_days = 3650
+
+# Certificate subject (auto generated certificate) for token signing. (string
+# value)
+#cert_subject = /C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com
+
+
+[ssl]
+
+#
+# From keystone
+#
+
+# Path of the CA key file for SSL. (string value)
+#ca_key = /etc/keystone/ssl/private/cakey.pem
+
+# SSL key length (in bits) (auto generated certificate). (integer value)
+#key_size = 1024
+
+# Days the certificate is valid for once signed (auto generated certificate).
+# (integer value)
+#valid_days = 3650
+
+# SSL certificate subject (auto generated certificate). (string value)
+#cert_subject = /C=US/ST=Unset/L=Unset/O=Unset/CN=localhost
+
+
+[token]
+
+#
+# From keystone
+#
+
+# External auth mechanisms that should add bind information to token, e.g.,
+# kerberos,x509. (list value)
+#bind =
+
+# Enforcement policy on tokens presented to Keystone with bind information. One
+# of disabled, permissive, strict, required or a specifically required bind
+# mode, e.g., kerberos or x509 to require binding to that authentication.
+# (string value)
+#enforce_token_bind = permissive
+
+# Amount of time a token should remain valid (in seconds). (integer value)
+#expiration = 3600
+
+# Controls the token construction, validation, and revocation operations. Core
+# providers are "keystone.token.providers.[fernet|pkiz|pki|uuid].Provider". The
+# default provider is uuid. (string value)
+#provider = keystone.token.providers.uuid.Provider
+
+# Token persistence backend driver. (string value)
+#driver = keystone.token.persistence.backends.sql.Token
+
+# Toggle for token system caching. This has no effect unless global caching is
+# enabled. (boolean value)
+#caching = true
+
+# Time to cache the revocation list and the revocation events if revoke
+# extension is enabled (in seconds). This has no effect unless global and token
+# caching are enabled. (integer value)
+#revocation_cache_time = 3600
+
+# Time to cache tokens (in seconds). This has no effect unless global and token
+# caching are enabled. (integer value)
+#cache_time = <None>
+
+# Revoke token by token identifier. Setting revoke_by_id to true enables
+# various forms of enumerating tokens, e.g. `list tokens for user`. These
+# enumerations are processed to determine the list of tokens to revoke. Only
+# disable if you are switching to using the Revoke extension with a backend
+# other than KVS, which stores events in memory. (boolean value)
+#revoke_by_id = true
+
+# Allow rescoping of scoped token. Setting allow_rescoped_scoped_token to false
+# prevents a user from exchanging a scoped token for any other token. (boolean
+# value)
+#allow_rescope_scoped_token = true
+
+# The hash algorithm to use for PKI tokens. This can be set to any algorithm
+# that hashlib supports. WARNING: Before changing this value, the auth_token
+# middleware must be configured with the hash_algorithms, otherwise token
+# revocation will not be processed correctly. (string value)
+#hash_algorithm = md5
+
+
+[trust]
+
+#
+# From keystone
+#
+
+# Delegation and impersonation features can be optionally disabled. (boolean
+# value)
+#enabled = true
+
+# Enable redelegation feature. (boolean value)
+#allow_redelegation = false
+
+# Maximum depth of trust redelegation. (integer value)
+#max_redelegation_count = 3
+
+# Trust backend driver. (string value)
+#driver = keystone.trust.backends.sql.Trust
+
+
+[moon]
+
+# Authorisation backend driver. (string value)
+#authz_driver = keystone.contrib.moon.backends.flat.SuperExtensionConnector
+
+# Moon Log driver. (string value)
+#log_driver = keystone.contrib.moon.backends.flat.LogConnector
+
+# SuperExtension backend driver. (string value)
+#superextension_driver = keystone.contrib.moon.backends.flat.SuperExtensionConnector
+
+# IntraExtension backend driver. (string value)
+#intraextension_driver = keystone.contrib.moon.backends.sql.IntraExtensionConnector
+
+# Tenant backend driver. (string value)
+#tenant_driver = keystone.contrib.moon.backends.sql.TenantConnector
+
+# Local directory where all policies are stored. (string value)
+#policy_directory = /etc/keystone/policies
+
+# Local directory where SuperExtension configuration is stored. (string value)
+#super_extension_directory = /etc/keystone/super_extension
diff --git a/keystone-moon/etc/logging.conf.sample b/keystone-moon/etc/logging.conf.sample
new file mode 100644 (file)
index 0000000..6cb8c42
--- /dev/null
@@ -0,0 +1,65 @@
+[loggers]
+keys=root,access
+
+[handlers]
+keys=production,file,access_file,devel
+
+[formatters]
+keys=minimal,normal,debug
+
+
+###########
+# Loggers #
+###########
+
+[logger_root]
+level=WARNING
+handlers=file
+
+[logger_access]
+level=INFO
+qualname=access
+handlers=access_file
+
+
+################
+# Log Handlers #
+################
+
+[handler_production]
+class=handlers.SysLogHandler
+level=ERROR
+formatter=normal
+args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER)
+
+[handler_file]
+class=handlers.WatchedFileHandler
+level=WARNING
+formatter=normal
+args=('error.log',)
+
+[handler_access_file]
+class=handlers.WatchedFileHandler
+level=INFO
+formatter=minimal
+args=('access.log',)
+
+[handler_devel]
+class=StreamHandler
+level=NOTSET
+formatter=debug
+args=(sys.stdout,)
+
+
+##################
+# Log Formatters #
+##################
+
+[formatter_minimal]
+format=%(message)s
+
+[formatter_normal]
+format=(%(name)s): %(asctime)s %(levelname)s %(message)s
+
+[formatter_debug]
+format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s
diff --git a/keystone-moon/etc/policy.json b/keystone-moon/etc/policy.json
new file mode 100644 (file)
index 0000000..f0a081d
--- /dev/null
@@ -0,0 +1,181 @@
+{
+    "admin_required": "role:admin or is_admin:1",
+    "service_role": "role:service",
+    "service_or_admin": "rule:admin_required or rule:service_role",
+    "owner" : "user_id:%(user_id)s",
+    "admin_or_owner": "rule:admin_required or rule:owner",
+
+    "default": "rule:admin_required",
+
+    "identity:get_region": "",
+    "identity:list_regions": "",
+    "identity:create_region": "rule:admin_required",
+    "identity:update_region": "rule:admin_required",
+    "identity:delete_region": "rule:admin_required",
+
+    "identity:get_service": "rule:admin_required",
+    "identity:list_services": "rule:admin_required",
+    "identity:create_service": "rule:admin_required",
+    "identity:update_service": "rule:admin_required",
+    "identity:delete_service": "rule:admin_required",
+
+    "identity:get_endpoint": "rule:admin_required",
+    "identity:list_endpoints": "rule:admin_required",
+    "identity:create_endpoint": "rule:admin_required",
+    "identity:update_endpoint": "rule:admin_required",
+    "identity:delete_endpoint": "rule:admin_required",
+
+    "identity:get_domain": "rule:admin_required",
+    "identity:list_domains": "rule:admin_required",
+    "identity:create_domain": "rule:admin_required",
+    "identity:update_domain": "rule:admin_required",
+    "identity:delete_domain": "rule:admin_required",
+
+    "identity:get_project": "rule:admin_required",
+    "identity:list_projects": "rule:admin_required",
+    "identity:list_user_projects": "rule:admin_or_owner",
+    "identity:create_project": "rule:admin_required",
+    "identity:update_project": "rule:admin_required",
+    "identity:delete_project": "rule:admin_required",
+
+    "identity:get_user": "rule:admin_required",
+    "identity:list_users": "rule:admin_required",
+    "identity:create_user": "rule:admin_required",
+    "identity:update_user": "rule:admin_required",
+    "identity:delete_user": "rule:admin_required",
+    "identity:change_password": "rule:admin_or_owner",
+
+    "identity:get_group": "rule:admin_required",
+    "identity:list_groups": "rule:admin_required",
+    "identity:list_groups_for_user": "rule:admin_or_owner",
+    "identity:create_group": "rule:admin_required",
+    "identity:update_group": "rule:admin_required",
+    "identity:delete_group": "rule:admin_required",
+    "identity:list_users_in_group": "rule:admin_required",
+    "identity:remove_user_from_group": "rule:admin_required",
+    "identity:check_user_in_group": "rule:admin_required",
+    "identity:add_user_to_group": "rule:admin_required",
+
+    "identity:get_credential": "rule:admin_required",
+    "identity:list_credentials": "rule:admin_required",
+    "identity:create_credential": "rule:admin_required",
+    "identity:update_credential": "rule:admin_required",
+    "identity:delete_credential": "rule:admin_required",
+
+    "identity:ec2_get_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)",
+    "identity:ec2_list_credentials": "rule:admin_or_owner",
+    "identity:ec2_create_credential": "rule:admin_or_owner",
+    "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)",
+
+    "identity:get_role": "rule:admin_required",
+    "identity:list_roles": "rule:admin_required",
+    "identity:create_role": "rule:admin_required",
+    "identity:update_role": "rule:admin_required",
+    "identity:delete_role": "rule:admin_required",
+
+    "identity:check_grant": "rule:admin_required",
+    "identity:list_grants": "rule:admin_required",
+    "identity:create_grant": "rule:admin_required",
+    "identity:revoke_grant": "rule:admin_required",
+
+    "identity:list_role_assignments": "rule:admin_required",
+
+    "identity:get_policy": "rule:admin_required",
+    "identity:list_policies": "rule:admin_required",
+    "identity:create_policy": "rule:admin_required",
+    "identity:update_policy": "rule:admin_required",
+    "identity:delete_policy": "rule:admin_required",
+
+    "identity:check_token": "rule:admin_required",
+    "identity:validate_token": "rule:service_or_admin",
+    "identity:validate_token_head": "rule:service_or_admin",
+    "identity:revocation_list": "rule:service_or_admin",
+    "identity:revoke_token": "rule:admin_or_owner",
+
+    "identity:create_trust": "user_id:%(trust.trustor_user_id)s",
+    "identity:get_trust": "rule:admin_or_owner",
+    "identity:list_trusts": "",
+    "identity:list_roles_for_trust": "",
+    "identity:get_role_for_trust": "",
+    "identity:delete_trust": "",
+
+    "identity:create_consumer": "rule:admin_required",
+    "identity:get_consumer": "rule:admin_required",
+    "identity:list_consumers": "rule:admin_required",
+    "identity:delete_consumer": "rule:admin_required",
+    "identity:update_consumer": "rule:admin_required",
+
+    "identity:authorize_request_token": "rule:admin_required",
+    "identity:list_access_token_roles": "rule:admin_required",
+    "identity:get_access_token_role": "rule:admin_required",
+    "identity:list_access_tokens": "rule:admin_required",
+    "identity:get_access_token": "rule:admin_required",
+    "identity:delete_access_token": "rule:admin_required",
+
+    "identity:list_projects_for_endpoint": "rule:admin_required",
+    "identity:add_endpoint_to_project": "rule:admin_required",
+    "identity:check_endpoint_in_project": "rule:admin_required",
+    "identity:list_endpoints_for_project": "rule:admin_required",
+    "identity:remove_endpoint_from_project": "rule:admin_required",
+
+    "identity:create_endpoint_group": "rule:admin_required",
+    "identity:list_endpoint_groups": "rule:admin_required",
+    "identity:get_endpoint_group": "rule:admin_required",
+    "identity:update_endpoint_group": "rule:admin_required",
+    "identity:delete_endpoint_group": "rule:admin_required",
+    "identity:list_projects_associated_with_endpoint_group": "rule:admin_required",
+    "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required",
+    "identity:get_endpoint_group_in_project": "rule:admin_required",
+    "identity:add_endpoint_group_to_project": "rule:admin_required",
+    "identity:remove_endpoint_group_from_project": "rule:admin_required",
+
+    "identity:create_identity_provider": "rule:admin_required",
+    "identity:list_identity_providers": "rule:admin_required",
+    "identity:get_identity_providers": "rule:admin_required",
+    "identity:update_identity_provider": "rule:admin_required",
+    "identity:delete_identity_provider": "rule:admin_required",
+
+    "identity:create_protocol": "rule:admin_required",
+    "identity:update_protocol": "rule:admin_required",
+    "identity:get_protocol": "rule:admin_required",
+    "identity:list_protocols": "rule:admin_required",
+    "identity:delete_protocol": "rule:admin_required",
+
+    "identity:create_mapping": "rule:admin_required",
+    "identity:get_mapping": "rule:admin_required",
+    "identity:list_mappings": "rule:admin_required",
+    "identity:delete_mapping": "rule:admin_required",
+    "identity:update_mapping": "rule:admin_required",
+
+    "identity:create_service_provider": "rule:admin_required",
+    "identity:list_service_providers": "rule:admin_required",
+    "identity:get_service_provider": "rule:admin_required",
+    "identity:update_service_provider": "rule:admin_required",
+    "identity:delete_service_provider": "rule:admin_required",
+
+    "identity:get_auth_catalog": "",
+    "identity:get_auth_projects": "",
+    "identity:get_auth_domains": "",
+
+    "identity:list_projects_for_groups": "",
+    "identity:list_domains_for_groups": "",
+
+    "identity:list_revoke_events": "",
+
+    "identity:create_policy_association_for_endpoint": "rule:admin_required",
+    "identity:check_policy_association_for_endpoint": "rule:admin_required",
+    "identity:delete_policy_association_for_endpoint": "rule:admin_required",
+    "identity:create_policy_association_for_service": "rule:admin_required",
+    "identity:check_policy_association_for_service": "rule:admin_required",
+    "identity:delete_policy_association_for_service": "rule:admin_required",
+    "identity:create_policy_association_for_region_and_service": "rule:admin_required",
+    "identity:check_policy_association_for_region_and_service": "rule:admin_required",
+    "identity:delete_policy_association_for_region_and_service": "rule:admin_required",
+    "identity:get_policy_for_endpoint": "rule:admin_required",
+    "identity:list_endpoints_for_policy": "rule:admin_required",
+
+    "identity:create_domain_config": "rule:admin_required",
+    "identity:get_domain_config": "rule:admin_required",
+    "identity:update_domain_config": "rule:admin_required",
+    "identity:delete_domain_config": "rule:admin_required"
+}
diff --git a/keystone-moon/etc/policy.v3cloudsample.json b/keystone-moon/etc/policy.v3cloudsample.json
new file mode 100644 (file)
index 0000000..a15b33f
--- /dev/null
@@ -0,0 +1,194 @@
+{
+    "admin_required": "role:admin",
+    "cloud_admin": "rule:admin_required and domain_id:admin_domain_id",
+    "service_role": "role:service",
+    "service_or_admin": "rule:admin_required or rule:service_role",
+    "owner" : "user_id:%(user_id)s or user_id:%(target.token.user_id)s",
+    "admin_or_owner": "(rule:admin_required and domain_id:%(target.token.user.domain.id)s) or rule:owner",
+    "admin_or_cloud_admin": "rule:admin_required or rule:cloud_admin",
+    "admin_and_matching_domain_id": "rule:admin_required and domain_id:%(domain_id)s",
+
+    "default": "rule:admin_required",
+
+    "identity:get_region": "",
+    "identity:list_regions": "",
+    "identity:create_region": "rule:cloud_admin",
+    "identity:update_region": "rule:cloud_admin",
+    "identity:delete_region": "rule:cloud_admin",
+
+    "identity:get_service": "rule:admin_or_cloud_admin",
+    "identity:list_services": "rule:admin_or_cloud_admin",
+    "identity:create_service": "rule:cloud_admin",
+    "identity:update_service": "rule:cloud_admin",
+    "identity:delete_service": "rule:cloud_admin",
+
+    "identity:get_endpoint": "rule:admin_or_cloud_admin",
+    "identity:list_endpoints": "rule:admin_or_cloud_admin",
+    "identity:create_endpoint": "rule:cloud_admin",
+    "identity:update_endpoint": "rule:cloud_admin",
+    "identity:delete_endpoint": "rule:cloud_admin",
+
+    "identity:get_domain": "rule:cloud_admin",
+    "identity:list_domains": "rule:cloud_admin",
+    "identity:create_domain": "rule:cloud_admin",
+    "identity:update_domain": "rule:cloud_admin",
+    "identity:delete_domain": "rule:cloud_admin",
+
+    "admin_and_matching_target_project_domain_id": "rule:admin_required and domain_id:%(target.project.domain_id)s",
+    "admin_and_matching_project_domain_id": "rule:admin_required and domain_id:%(project.domain_id)s",
+    "identity:get_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id",
+    "identity:list_projects": "rule:cloud_admin or rule:admin_and_matching_domain_id",
+    "identity:list_user_projects": "rule:owner or rule:admin_and_matching_domain_id",
+    "identity:create_project": "rule:cloud_admin or rule:admin_and_matching_project_domain_id",
+    "identity:update_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id",
+    "identity:delete_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id",
+
+    "admin_and_matching_target_user_domain_id": "rule:admin_required and domain_id:%(target.user.domain_id)s",
+    "admin_and_matching_user_domain_id": "rule:admin_required and domain_id:%(user.domain_id)s",
+    "identity:get_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id",
+    "identity:list_users": "rule:cloud_admin or rule:admin_and_matching_domain_id",
+    "identity:create_user": "rule:cloud_admin or rule:admin_and_matching_user_domain_id",
+    "identity:update_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id",
+    "identity:delete_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id",
+
+    "admin_and_matching_target_group_domain_id": "rule:admin_required and domain_id:%(target.group.domain_id)s",
+    "admin_and_matching_group_domain_id": "rule:admin_required and domain_id:%(group.domain_id)s",
+    "identity:get_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
+    "identity:list_groups": "rule:cloud_admin or rule:admin_and_matching_domain_id",
+    "identity:list_groups_for_user": "rule:owner or rule:admin_and_matching_domain_id",
+    "identity:create_group": "rule:cloud_admin or rule:admin_and_matching_group_domain_id",
+    "identity:update_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
+    "identity:delete_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
+    "identity:list_users_in_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
+    "identity:remove_user_from_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
+    "identity:check_user_in_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
+    "identity:add_user_to_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
+
+    "identity:get_credential": "rule:admin_required",
+    "identity:list_credentials": "rule:admin_required or user_id:%(user_id)s",
+    "identity:create_credential": "rule:admin_required",
+    "identity:update_credential": "rule:admin_required",
+    "identity:delete_credential": "rule:admin_required",
+
+    "identity:ec2_get_credential": "rule:admin_or_cloud_admin or (rule:owner and user_id:%(target.credential.user_id)s)",
+    "identity:ec2_list_credentials": "rule:admin_or_cloud_admin or rule:owner",
+    "identity:ec2_create_credential": "rule:admin_or_cloud_admin or rule:owner",
+    "identity:ec2_delete_credential": "rule:admin_or_cloud_admin or (rule:owner and user_id:%(target.credential.user_id)s)",
+
+    "identity:get_role": "rule:admin_or_cloud_admin",
+    "identity:list_roles": "rule:admin_or_cloud_admin",
+    "identity:create_role": "rule:cloud_admin",
+    "identity:update_role": "rule:cloud_admin",
+    "identity:delete_role": "rule:cloud_admin",
+
+    "domain_admin_for_grants": "rule:admin_required and (domain_id:%(domain_id)s or domain_id:%(target.project.domain_id)s)",
+    "project_admin_for_grants": "rule:admin_required and project_id:%(project_id)s",
+    "identity:check_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants",
+    "identity:list_grants": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants",
+    "identity:create_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants",
+    "identity:revoke_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants",
+
+    "admin_on_domain_filter" : "rule:cloud_admin or (rule:admin_required and domain_id:%(scope.domain.id)s)",
+    "admin_on_project_filter" : "rule:cloud_admin or (rule:admin_required and project_id:%(scope.project.id)s)",
+    "identity:list_role_assignments": "rule:admin_on_domain_filter or rule:admin_on_project_filter",
+
+    "identity:get_policy": "rule:cloud_admin",
+    "identity:list_policies": "rule:cloud_admin",
+    "identity:create_policy": "rule:cloud_admin",
+    "identity:update_policy": "rule:cloud_admin",
+    "identity:delete_policy": "rule:cloud_admin",
+
+    "identity:change_password": "rule:owner",
+    "identity:check_token": "rule:admin_or_owner",
+    "identity:validate_token": "rule:service_or_admin",
+    "identity:validate_token_head": "rule:service_or_admin",
+    "identity:revocation_list": "rule:service_or_admin",
+    "identity:revoke_token": "rule:admin_or_owner",
+
+    "identity:create_trust": "user_id:%(trust.trustor_user_id)s",
+    "identity:get_trust": "rule:admin_or_owner",
+    "identity:list_trusts": "",
+    "identity:list_roles_for_trust": "",
+    "identity:get_role_for_trust": "",
+    "identity:delete_trust": "",
+
+    "identity:create_consumer": "rule:admin_required",
+    "identity:get_consumer": "rule:admin_required",
+    "identity:list_consumers": "rule:admin_required",
+    "identity:delete_consumer": "rule:admin_required",
+    "identity:update_consumer": "rule:admin_required",
+
+    "identity:authorize_request_token": "rule:admin_required",
+    "identity:list_access_token_roles": "rule:admin_required",
+    "identity:get_access_token_role": "rule:admin_required",
+    "identity:list_access_tokens": "rule:admin_required",
+    "identity:get_access_token": "rule:admin_required",
+    "identity:delete_access_token": "rule:admin_required",
+
+    "identity:list_projects_for_endpoint": "rule:admin_required",
+    "identity:add_endpoint_to_project": "rule:admin_required",
+    "identity:check_endpoint_in_project": "rule:admin_required",
+    "identity:list_endpoints_for_project": "rule:admin_required",
+    "identity:remove_endpoint_from_project": "rule:admin_required",
+
+    "identity:create_endpoint_group": "rule:admin_required",
+    "identity:list_endpoint_groups": "rule:admin_required",
+    "identity:get_endpoint_group": "rule:admin_required",
+    "identity:update_endpoint_group": "rule:admin_required",
+    "identity:delete_endpoint_group": "rule:admin_required",
+    "identity:list_projects_associated_with_endpoint_group": "rule:admin_required",
+    "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required",
+    "identity:get_endpoint_group_in_project": "rule:admin_required",
+    "identity:add_endpoint_group_to_project": "rule:admin_required",
+    "identity:remove_endpoint_group_from_project": "rule:admin_required",
+
+    "identity:create_identity_provider": "rule:cloud_admin",
+    "identity:list_identity_providers": "rule:cloud_admin",
+    "identity:get_identity_providers": "rule:cloud_admin",
+    "identity:update_identity_provider": "rule:cloud_admin",
+    "identity:delete_identity_provider": "rule:cloud_admin",
+
+    "identity:create_protocol": "rule:cloud_admin",
+    "identity:update_protocol": "rule:cloud_admin",
+    "identity:get_protocol": "rule:cloud_admin",
+    "identity:list_protocols": "rule:cloud_admin",
+    "identity:delete_protocol": "rule:cloud_admin",
+
+    "identity:create_mapping": "rule:cloud_admin",
+    "identity:get_mapping": "rule:cloud_admin",
+    "identity:list_mappings": "rule:cloud_admin",
+    "identity:delete_mapping": "rule:cloud_admin",
+    "identity:update_mapping": "rule:cloud_admin",
+
+    "identity:create_service_provider": "rule:cloud_admin",
+    "identity:list_service_providers": "rule:cloud_admin",
+    "identity:get_service_provider": "rule:cloud_admin",
+    "identity:update_service_provider": "rule:cloud_admin",
+    "identity:delete_service_provider": "rule:cloud_admin",
+
+    "identity:get_auth_catalog": "",
+    "identity:get_auth_projects": "",
+    "identity:get_auth_domains": "",
+
+    "identity:list_projects_for_groups": "",
+    "identity:list_domains_for_groups": "",
+
+    "identity:list_revoke_events": "",
+
+    "identity:create_policy_association_for_endpoint": "rule:cloud_admin",
+    "identity:check_policy_association_for_endpoint": "rule:cloud_admin",
+    "identity:delete_policy_association_for_endpoint": "rule:cloud_admin",
+    "identity:create_policy_association_for_service": "rule:cloud_admin",
+    "identity:check_policy_association_for_service": "rule:cloud_admin",
+    "identity:delete_policy_association_for_service": "rule:cloud_admin",
+    "identity:create_policy_association_for_region_and_service": "rule:cloud_admin",
+    "identity:check_policy_association_for_region_and_service": "rule:cloud_admin",
+    "identity:delete_policy_association_for_region_and_service": "rule:cloud_admin",
+    "identity:get_policy_for_endpoint": "rule:cloud_admin",
+    "identity:list_endpoints_for_policy": "rule:cloud_admin",
+
+    "identity:create_domain_config": "rule:cloud_admin",
+    "identity:get_domain_config": "rule:cloud_admin",
+    "identity:update_domain_config": "rule:cloud_admin",
+    "identity:delete_domain_config": "rule:cloud_admin"
+}
diff --git a/keystone-moon/etc/sso_callback_template.html b/keystone-moon/etc/sso_callback_template.html
new file mode 100644 (file)
index 0000000..c6997dc
--- /dev/null
@@ -0,0 +1,22 @@
+<!DOCTYPE html>
+<html xmlns="http://www.w3.org/1999/xhtml">
+  <head>
+    <title>Keystone WebSSO redirect</title>
+  </head>
+  <body>
+     <form id="sso" name="sso" action="$host" method="post">
+       Please wait...
+       <br/>
+       <input type="hidden" name="token" id="token" value="$token"/>
+       <noscript>
+         <input type="submit" name="submit_no_javascript" id="submit_no_javascript"
+            value="If your JavaScript is disabled, please click to continue"/>
+       </noscript>
+     </form>
+     <script type="text/javascript">
+       window.onload = function() {
+         document.forms['sso'].submit();
+       }
+     </script>
+  </body>
+</html>
\ No newline at end of file
diff --git a/keystone-moon/examples/moon/__init__.py b/keystone-moon/examples/moon/__init__.py
new file mode 100644 (file)
index 0000000..1b678d5
--- /dev/null
@@ -0,0 +1,4 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
diff --git a/keystone-moon/examples/moon/policies/mls_conf/authz/assignment.json b/keystone-moon/examples/moon/policies/mls_conf/authz/assignment.json
new file mode 100644 (file)
index 0000000..c917638
--- /dev/null
@@ -0,0 +1,25 @@
+{
+    "subject_assignments": {
+        "subject_security_level":{
+            "user1": ["low"],
+            "user2": ["medium"],
+            "user3": ["high"]
+        }
+    },
+
+    "action_assignments": {
+        "computing_action":{
+          "pause": ["vm_admin"],
+          "unpause": ["vm_admin"],
+          "start": ["vm_admin"],
+          "stop": ["vm_admin"]
+        }
+    },
+
+    "object_assignments": {
+        "object_security_level": {
+            "vm1": ["low"],
+            "vm2": ["medium"]
+        }
+    }
+}
\ No newline at end of file
diff --git a/keystone-moon/examples/moon/policies/mls_conf/authz/metadata.json b/keystone-moon/examples/moon/policies/mls_conf/authz/metadata.json
new file mode 100644 (file)
index 0000000..0c21f17
--- /dev/null
@@ -0,0 +1,18 @@
+{
+    "name": "MLS_metadata",
+    "model": "MLS",
+    "genre": "authz",
+    "description": "",
+
+    "subject_categories": [
+        "subject_security_level"
+    ],
+
+    "action_categories": [
+        "computing_action"
+    ],
+
+    "object_categories": [
+        "object_security_level"
+    ]
+}
diff --git a/keystone-moon/examples/moon/policies/mls_conf/authz/metarule.json b/keystone-moon/examples/moon/policies/mls_conf/authz/metarule.json
new file mode 100644 (file)
index 0000000..0f71745
--- /dev/null
@@ -0,0 +1,12 @@
+{
+  "sub_meta_rules": {
+    "relation_super": {
+      "subject_categories": ["subject_security_level"],
+      "action_categories": ["computing_action"],
+      "object_categories": ["object_security_level"],
+      "relation": "relation_super"
+    }
+  },
+  "aggregation": "and_true_aggregation"
+}
+
diff --git a/keystone-moon/examples/moon/policies/mls_conf/authz/rules.json b/keystone-moon/examples/moon/policies/mls_conf/authz/rules.json
new file mode 100644 (file)
index 0000000..7badb6f
--- /dev/null
@@ -0,0 +1,13 @@
+{
+  "relation_super":[
+    ["high", "vm_admin", "medium"],
+    ["high", "vm_admin", "low"],
+    ["medium", "vm_admin", "low"],
+    ["high", "vm_access", "high"],
+    ["high", "vm_access", "medium"],
+    ["high", "vm_access", "low"],
+    ["medium", "vm_access", "medium"],
+    ["medium", "vm_access", "low"],
+    ["low", "vm_access", "low"]
+  ]
+}
\ No newline at end of file
diff --git a/keystone-moon/examples/moon/policies/mls_conf/authz/scope.json b/keystone-moon/examples/moon/policies/mls_conf/authz/scope.json
new file mode 100644 (file)
index 0000000..f07b007
--- /dev/null
@@ -0,0 +1,24 @@
+{
+  "subject_category_scope": {
+    "subject_security_level": [
+      "high",
+      "medium",
+      "low"
+    ]
+  },
+
+  "action_category_scope": {
+    "computing_action": [
+      "vm_admin",
+      "vm_access"
+    ]
+  },
+
+  "object_category_scope": {
+    "object_security_level": [
+      "high",
+      "medium",
+      "low"
+      ]
+    }
+}
diff --git a/keystone-moon/examples/moon/policies/policy_mls_admin/assignment.json b/keystone-moon/examples/moon/policies/policy_mls_admin/assignment.json
new file mode 100644 (file)
index 0000000..e1c208d
--- /dev/null
@@ -0,0 +1,37 @@
+{
+    "subject_assignments": {
+        "role":{
+            "admin": ["admin" ]
+        }
+    },
+
+    "action_assignments": {
+        "ie_action":{
+            "read": ["ie_admin", "ie_access"],
+            "write": ["ie_admin"],
+            "create": ["ie_admin"],
+            "delete": ["ie_admin"]
+        }
+    },
+
+    "object_assignments": {
+        "id": {
+            "subjects": ["subjects"],
+            "objects": ["objects"],
+            "actions": ["actions"],
+            "subject_categories": ["subject_categories"],
+            "object_categories": ["object_categories"],
+            "action_categories": ["action_categories"],
+            "subject_category_scope": ["subject_category_scope"],
+            "object_category_scope": ["object_category_scope"],
+            "action_category_scope": ["action_category_scope"],
+            "sub_rules": ["sub_rules"],
+            "sub_meta_rule": ["sub_meta_rule"],
+            "subject_assignments": ["subject_assignments"],
+            "object_assignments": ["object_assignments"],
+            "action_assignments": ["action_assignments"],
+            "sub_meta_rule_relations": ["sub_meta_rule_relations"],
+            "aggregation_algorithms": ["aggregation_algorithms"]
+        }
+    }
+}
diff --git a/keystone-moon/examples/moon/policies/policy_mls_admin/metadata.json b/keystone-moon/examples/moon/policies/policy_mls_admin/metadata.json
new file mode 100644 (file)
index 0000000..f65cb27
--- /dev/null
@@ -0,0 +1,18 @@
+{
+    "name": "RBAC_metadata",
+    "model": "RBAC",
+    "genre": "authz",
+    "description": "Role Based access Control authorization policy",
+
+    "subject_categories": [
+        "role"
+    ],
+
+    "action_categories": [
+        "ie_action"
+    ],
+
+    "object_categories": [
+        "id"
+    ]
+}
diff --git a/keystone-moon/examples/moon/policies/policy_mls_admin/metarule.json b/keystone-moon/examples/moon/policies/policy_mls_admin/metarule.json
new file mode 100644 (file)
index 0000000..3a2c7b7
--- /dev/null
@@ -0,0 +1,12 @@
+{
+  "sub_meta_rules": {
+    "relation_super": {
+      "subject_categories": ["role"],
+      "action_categories": ["ie_action"],
+      "object_categories": ["id"],
+      "relation": "relation_super"
+    }
+  },
+  "aggregation": "and_true_aggregation"
+}
+
diff --git a/keystone-moon/examples/moon/policies/policy_mls_admin/perimeter.json b/keystone-moon/examples/moon/policies/policy_mls_admin/perimeter.json
new file mode 100644 (file)
index 0000000..e570aae
--- /dev/null
@@ -0,0 +1,29 @@
+{
+    "subjects": [
+        "admin"
+    ],
+    "actions": [
+        "read",
+        "write",
+        "create",
+        "delete"
+    ],
+    "objects": [
+        "subjects",
+        "objects",
+        "actions",
+        "subject_categories",
+        "object_categories",
+        "action_categories",
+        "subject_category_scope",
+        "object_category_scope",
+        "action_category_scope",
+        "sub_rules",
+        "subject_assignments",
+        "object_assignments",
+        "action_assignments",
+        "sub_meta_rule_relations",
+        "aggregation_algorithms",
+        "sub_meta_rule"
+    ]
+}
diff --git a/keystone-moon/examples/moon/policies/policy_mls_admin/rules.json b/keystone-moon/examples/moon/policies/policy_mls_admin/rules.json
new file mode 100644 (file)
index 0000000..e17ba8f
--- /dev/null
@@ -0,0 +1,20 @@
+{
+  "relation_super":[
+    ["admin", "ie_admin", "subjects"],
+    ["admin", "ie_admin", "objects"],
+    ["admin", "ie_admin", "actions"],
+    ["admin", "ie_admin", "subject_categories"],
+    ["admin", "ie_admin", "object_categories"],
+    ["admin", "ie_admin", "action_categories"],
+    ["admin", "ie_admin", "subject_category_scope"],
+    ["admin", "ie_admin", "object_category_scope"],
+    ["admin", "ie_admin", "action_category_scope"],
+    ["admin", "ie_admin", "sub_rules"],
+    ["admin", "ie_admin", "sub_meta_rule"],
+    ["admin", "ie_admin", "subject_assignments"],
+    ["admin", "ie_admin", "object_assignments"],
+    ["admin", "ie_admin", "action_assignments"],
+    ["admin", "ie_admin", "sub_meta_rule_relations"],
+    ["admin", "ie_admin", "aggregation_algorithms"]
+  ]
+}
diff --git a/keystone-moon/examples/moon/policies/policy_mls_admin/scope.json b/keystone-moon/examples/moon/policies/policy_mls_admin/scope.json
new file mode 100644 (file)
index 0000000..faf06d2
--- /dev/null
@@ -0,0 +1,35 @@
+{
+  "subject_category_scope": {
+    "role": [
+      "admin"
+    ]
+  },
+
+  "action_category_scope": {
+    "ie_action": [
+      "ie_access",
+      "ie_admin"
+    ]
+  },
+
+  "object_category_scope": {
+    "id": [
+        "subjects",
+        "objects",
+        "actions",
+        "subject_categories",
+        "object_categories",
+        "action_categories",
+        "subject_category_scope",
+        "object_category_scope",
+        "action_category_scope",
+        "sub_rules",
+        "sub_meta_rule",
+        "subject_assignments",
+        "object_assignments",
+        "action_assignments",
+        "sub_meta_rule_relations",
+        "aggregation_algorithms"
+      ]
+    }
+}
diff --git a/keystone-moon/examples/moon/policies/policy_mls_authz/assignment.json b/keystone-moon/examples/moon/policies/policy_mls_authz/assignment.json
new file mode 100644 (file)
index 0000000..e2a244b
--- /dev/null
@@ -0,0 +1,23 @@
+{
+    "subject_assignments": {
+        "subject_security_level":{
+        }
+    },
+
+    "action_assignments": {
+        "computing_action":{
+          "pause": ["vm_admin"],
+          "unpause": ["vm_admin"],
+          "start": ["vm_admin"],
+          "stop": ["vm_admin"],
+          "list": ["vm_access", "vm_admin"],
+          "create": ["vm_admin"]
+        }
+    },
+
+    "object_assignments": {
+        "object_security_level": {
+            "servers": ["low"]
+        }
+    }
+}
diff --git a/keystone-moon/examples/moon/policies/policy_mls_authz/metadata.json b/keystone-moon/examples/moon/policies/policy_mls_authz/metadata.json
new file mode 100644 (file)
index 0000000..56dc57d
--- /dev/null
@@ -0,0 +1,19 @@
+{
+    "name": "MLS_metadata",
+    "model": "MLS",
+    "genre": "authz",
+    "description": "Multi Layer Security authorization policy",
+
+    "subject_categories": [
+        "subject_security_level"
+    ],
+
+    "action_categories": [
+        "computing_action",
+        "storage_action"
+    ],
+
+    "object_categories": [
+        "object_security_level"
+    ]
+}
diff --git a/keystone-moon/examples/moon/policies/policy_mls_authz/metarule.json b/keystone-moon/examples/moon/policies/policy_mls_authz/metarule.json
new file mode 100644 (file)
index 0000000..0f71745
--- /dev/null
@@ -0,0 +1,12 @@
+{
+  "sub_meta_rules": {
+    "relation_super": {
+      "subject_categories": ["subject_security_level"],
+      "action_categories": ["computing_action"],
+      "object_categories": ["object_security_level"],
+      "relation": "relation_super"
+    }
+  },
+  "aggregation": "and_true_aggregation"
+}
+
diff --git a/keystone-moon/examples/moon/policies/policy_mls_authz/perimeter.json b/keystone-moon/examples/moon/policies/policy_mls_authz/perimeter.json
new file mode 100644 (file)
index 0000000..4bf88de
--- /dev/null
@@ -0,0 +1,16 @@
+{
+    "subjects": [
+        "admin"
+    ],
+    "actions": [
+        "pause",
+        "unpause",
+        "start",
+        "stop",
+        "create",
+        "list"
+    ],
+    "objects": [
+        "servers"
+    ]
+}
diff --git a/keystone-moon/examples/moon/policies/policy_mls_authz/rules.json b/keystone-moon/examples/moon/policies/policy_mls_authz/rules.json
new file mode 100644 (file)
index 0000000..f018a6f
--- /dev/null
@@ -0,0 +1,13 @@
+{
+  "relation_super":[
+    ["high", "vm_admin", "medium"],
+    ["high", "vm_admin", "low"],
+    ["medium", "vm_admin", "low"],
+    ["high", "vm_access", "high"],
+    ["high", "vm_access", "medium"],
+    ["high", "vm_access", "low"],
+    ["medium", "vm_access", "medium"],
+    ["medium", "vm_access", "low"],
+    ["low", "vm_access", "low"]
+  ]
+}
diff --git a/keystone-moon/examples/moon/policies/policy_mls_authz/scope.json b/keystone-moon/examples/moon/policies/policy_mls_authz/scope.json
new file mode 100644 (file)
index 0000000..d3146ac
--- /dev/null
@@ -0,0 +1,24 @@
+{
+  "subject_category_scope": {
+    "subject_security_level": [
+      "high",
+      "medium",
+      "low"
+    ]
+  },
+
+  "action_category_scope": {
+    "computing_action": [
+      "vm_access",
+      "vm_admin"
+    ]
+  },
+
+  "object_category_scope": {
+    "object_security_level": [
+      "high",
+      "medium",
+      "low"
+      ]
+    }
+}
diff --git a/keystone-moon/examples/moon/policies/policy_rbac_admin/assignment.json b/keystone-moon/examples/moon/policies/policy_rbac_admin/assignment.json
new file mode 100644 (file)
index 0000000..e1c208d
--- /dev/null
@@ -0,0 +1,37 @@
+{
+    "subject_assignments": {
+        "role":{
+            "admin": ["admin" ]
+        }
+    },
+
+    "action_assignments": {
+        "ie_action":{
+            "read": ["ie_admin", "ie_access"],
+            "write": ["ie_admin"],
+            "create": ["ie_admin"],
+            "delete": ["ie_admin"]
+        }
+    },
+
+    "object_assignments": {
+        "id": {
+            "subjects": ["subjects"],
+            "objects": ["objects"],
+            "actions": ["actions"],
+            "subject_categories": ["subject_categories"],
+            "object_categories": ["object_categories"],
+            "action_categories": ["action_categories"],
+            "subject_category_scope": ["subject_category_scope"],
+            "object_category_scope": ["object_category_scope"],
+            "action_category_scope": ["action_category_scope"],
+            "sub_rules": ["sub_rules"],
+            "sub_meta_rule": ["sub_meta_rule"],
+            "subject_assignments": ["subject_assignments"],
+            "object_assignments": ["object_assignments"],
+            "action_assignments": ["action_assignments"],
+            "sub_meta_rule_relations": ["sub_meta_rule_relations"],
+            "aggregation_algorithms": ["aggregation_algorithms"]
+        }
+    }
+}
diff --git a/keystone-moon/examples/moon/policies/policy_rbac_admin/metadata.json b/keystone-moon/examples/moon/policies/policy_rbac_admin/metadata.json
new file mode 100644 (file)
index 0000000..f65cb27
--- /dev/null
@@ -0,0 +1,18 @@
+{
+    "name": "RBAC_metadata",
+    "model": "RBAC",
+    "genre": "authz",
+    "description": "Role Based access Control authorization policy",
+
+    "subject_categories": [
+        "role"
+    ],
+
+    "action_categories": [
+        "ie_action"
+    ],
+
+    "object_categories": [
+        "id"
+    ]
+}
diff --git a/keystone-moon/examples/moon/policies/policy_rbac_admin/metarule.json b/keystone-moon/examples/moon/policies/policy_rbac_admin/metarule.json
new file mode 100644 (file)
index 0000000..3a2c7b7
--- /dev/null
@@ -0,0 +1,12 @@
+{
+  "sub_meta_rules": {
+    "relation_super": {
+      "subject_categories": ["role"],
+      "action_categories": ["ie_action"],
+      "object_categories": ["id"],
+      "relation": "relation_super"
+    }
+  },
+  "aggregation": "and_true_aggregation"
+}
+
diff --git a/keystone-moon/examples/moon/policies/policy_rbac_admin/perimeter.json b/keystone-moon/examples/moon/policies/policy_rbac_admin/perimeter.json
new file mode 100644 (file)
index 0000000..e570aae
--- /dev/null
@@ -0,0 +1,29 @@
+{
+    "subjects": [
+        "admin"
+    ],
+    "actions": [
+        "read",
+        "write",
+        "create",
+        "delete"
+    ],
+    "objects": [
+        "subjects",
+        "objects",
+        "actions",
+        "subject_categories",
+        "object_categories",
+        "action_categories",
+        "subject_category_scope",
+        "object_category_scope",
+        "action_category_scope",
+        "sub_rules",
+        "subject_assignments",
+        "object_assignments",
+        "action_assignments",
+        "sub_meta_rule_relations",
+        "aggregation_algorithms",
+        "sub_meta_rule"
+    ]
+}
diff --git a/keystone-moon/examples/moon/policies/policy_rbac_admin/rules.json b/keystone-moon/examples/moon/policies/policy_rbac_admin/rules.json
new file mode 100644 (file)
index 0000000..e17ba8f
--- /dev/null
@@ -0,0 +1,20 @@
+{
+  "relation_super":[
+    ["admin", "ie_admin", "subjects"],
+    ["admin", "ie_admin", "objects"],
+    ["admin", "ie_admin", "actions"],
+    ["admin", "ie_admin", "subject_categories"],
+    ["admin", "ie_admin", "object_categories"],
+    ["admin", "ie_admin", "action_categories"],
+    ["admin", "ie_admin", "subject_category_scope"],
+    ["admin", "ie_admin", "object_category_scope"],
+    ["admin", "ie_admin", "action_category_scope"],
+    ["admin", "ie_admin", "sub_rules"],
+    ["admin", "ie_admin", "sub_meta_rule"],
+    ["admin", "ie_admin", "subject_assignments"],
+    ["admin", "ie_admin", "object_assignments"],
+    ["admin", "ie_admin", "action_assignments"],
+    ["admin", "ie_admin", "sub_meta_rule_relations"],
+    ["admin", "ie_admin", "aggregation_algorithms"]
+  ]
+}
diff --git a/keystone-moon/examples/moon/policies/policy_rbac_admin/scope.json b/keystone-moon/examples/moon/policies/policy_rbac_admin/scope.json
new file mode 100644 (file)
index 0000000..faf06d2
--- /dev/null
@@ -0,0 +1,35 @@
+{
+  "subject_category_scope": {
+    "role": [
+      "admin"
+    ]
+  },
+
+  "action_category_scope": {
+    "ie_action": [
+      "ie_access",
+      "ie_admin"
+    ]
+  },
+
+  "object_category_scope": {
+    "id": [
+        "subjects",
+        "objects",
+        "actions",
+        "subject_categories",
+        "object_categories",
+        "action_categories",
+        "subject_category_scope",
+        "object_category_scope",
+        "action_category_scope",
+        "sub_rules",
+        "sub_meta_rule",
+        "subject_assignments",
+        "object_assignments",
+        "action_assignments",
+        "sub_meta_rule_relations",
+        "aggregation_algorithms"
+      ]
+    }
+}
diff --git a/keystone-moon/examples/moon/policies/policy_rbac_authz/assignment.json b/keystone-moon/examples/moon/policies/policy_rbac_authz/assignment.json
new file mode 100644 (file)
index 0000000..e804b56
--- /dev/null
@@ -0,0 +1,28 @@
+{
+    "subject_assignments": {
+        "role":{
+          "admin": ["admin" ]
+        }
+    },
+
+    "action_assignments": {
+        "computing_action":{
+          "pause": ["vm_admin"],
+          "unpause": ["vm_admin"],
+          "start": ["vm_admin"],
+          "stop": ["vm_admin"],
+          "list": ["vm_access", "vm_admin"],
+          "create": ["vm_admin"]
+        },
+        "storage_action":{
+          "get": ["vm_access"],
+          "set": ["vm_access", "vm_admin"]
+        }
+    },
+
+    "object_assignments": {
+        "id": {
+            "servers": ["servers"]
+        }
+    }
+}
diff --git a/keystone-moon/examples/moon/policies/policy_rbac_authz/metadata.json b/keystone-moon/examples/moon/policies/policy_rbac_authz/metadata.json
new file mode 100644 (file)
index 0000000..7f34ed7
--- /dev/null
@@ -0,0 +1,19 @@
+{
+    "name": "MLS_metadata",
+    "model": "MLS",
+    "genre": "authz",
+    "description": "Multi Layer Security authorization policy",
+
+    "subject_categories": [
+        "role"
+    ],
+
+    "action_categories": [
+        "computing_action",
+        "storage_action"
+    ],
+
+    "object_categories": [
+        "id"
+    ]
+}
diff --git a/keystone-moon/examples/moon/policies/policy_rbac_authz/metarule.json b/keystone-moon/examples/moon/policies/policy_rbac_authz/metarule.json
new file mode 100644 (file)
index 0000000..ce82833
--- /dev/null
@@ -0,0 +1,12 @@
+{
+  "sub_meta_rules": {
+    "relation_super": {
+      "subject_categories": ["role"],
+      "action_categories": ["computing_action", "storage_action"],
+      "object_categories": ["id"],
+      "relation": "relation_super"
+    }
+  },
+  "aggregation": "and_true_aggregation"
+}
+
diff --git a/keystone-moon/examples/moon/policies/policy_rbac_authz/perimeter.json b/keystone-moon/examples/moon/policies/policy_rbac_authz/perimeter.json
new file mode 100644 (file)
index 0000000..4bf88de
--- /dev/null
@@ -0,0 +1,16 @@
+{
+    "subjects": [
+        "admin"
+    ],
+    "actions": [
+        "pause",
+        "unpause",
+        "start",
+        "stop",
+        "create",
+        "list"
+    ],
+    "objects": [
+        "servers"
+    ]
+}
diff --git a/keystone-moon/examples/moon/policies/policy_rbac_authz/rules.json b/keystone-moon/examples/moon/policies/policy_rbac_authz/rules.json
new file mode 100644 (file)
index 0000000..7f9dc3b
--- /dev/null
@@ -0,0 +1,6 @@
+{
+  "relation_super":[
+    ["admin", "vm_admin", "vm_admin", "servers"],
+    ["admin", "vm_access", "vm_access", "servers"]
+  ]
+}
diff --git a/keystone-moon/examples/moon/policies/policy_rbac_authz/scope.json b/keystone-moon/examples/moon/policies/policy_rbac_authz/scope.json
new file mode 100644 (file)
index 0000000..34c5350
--- /dev/null
@@ -0,0 +1,24 @@
+{
+  "subject_category_scope": {
+    "role": [
+      "admin"
+    ]
+  },
+
+  "action_category_scope": {
+    "computing_action": [
+      "vm_access",
+      "vm_admin"
+    ],
+    "storage_action": [
+      "vm_access",
+      "vm_admin"
+    ]
+  },
+
+  "object_category_scope": {
+    "id": [
+      "servers"
+      ]
+    }
+}
diff --git a/keystone-moon/examples/moon/super_extension/policy/assignment.json b/keystone-moon/examples/moon/super_extension/policy/assignment.json
new file mode 100644 (file)
index 0000000..352d392
--- /dev/null
@@ -0,0 +1,26 @@
+{
+    "subject_category_assignments": {
+        "role":{
+            "admin": [
+                "super_user",
+                "super_admin",
+                "super_root",
+                "inter_extension_user",
+                "inter_extension_admin",
+                "inter_extension_root"
+            ]
+        }
+    },
+    "object_category_assignments": {
+        "action": {
+            "intra_extension": [],
+            "mapping": [],
+            "inter_extension": []
+        },
+        "object_id": {
+            "intra_extension": ["intra_extension"],
+            "mapping": ["mapping"],
+            "inter_extension": ["inter_extension"]
+        }
+    }
+}
diff --git a/keystone-moon/examples/moon/super_extension/policy/configuration.json b/keystone-moon/examples/moon/super_extension/policy/configuration.json
new file mode 100644 (file)
index 0000000..18918e7
--- /dev/null
@@ -0,0 +1,43 @@
+{
+    "subject_category_values": {
+        "role": [
+            "super_user",
+            "super_admin",
+            "super_root",
+            "inter_extension_user",
+            "inter_extension_admin",
+            "inter_extension_root"
+        ]
+    },
+
+    "object_category_values": {
+        "action": [
+            "list",
+            "create",
+            "destroy",
+            "delegate"
+        ],
+        "object_id": [
+            "intra_extension",
+            "mapping",
+            "inter_extension"
+        ]
+    },
+
+    "rules":{
+        "permission": [
+            ["super_user", "intra_extension", "list"],
+            ["super_admin", "intra_extension", "create"],
+            ["super_admin", "intra_extension", "destroy"],
+            ["super_root", "intra_extension", "delegate"],
+            ["super_user", "mapping", "list"],
+            ["super_admin", "mapping", "create"],
+            ["super_admin", "mapping", "destroy"],
+            ["super_root", "mapping", "delegate"],
+            ["inter_extension_user", "inter_extension", "list"],
+            ["inter_extension_admin", "inter_extension", "create"],
+            ["inter_extension_admin", "inter_extension", "destroy"],
+            ["inter_extension_root", "inter_extension", "delegate"]
+        ]
+    }
+}
\ No newline at end of file
diff --git a/keystone-moon/examples/moon/super_extension/policy/metadata.json b/keystone-moon/examples/moon/super_extension/policy/metadata.json
new file mode 100644 (file)
index 0000000..316bfcb
--- /dev/null
@@ -0,0 +1,26 @@
+{
+    "name": "RBAC_metadata",
+    "model": "RBAC",
+    "genre": "super",
+    "description": "",
+
+    "subject_categories": [
+        "role"
+    ],
+
+    "object_categories": [
+        "object_id",
+        "action"
+    ],
+
+    "meta_rule": {
+        "sub_meta_rules": {
+            "permission": {
+                    "subject_categories": ["role"],
+                    "object_categories": ["object_id", "action"],
+                    "relation": "permission"
+                }
+            },
+        "aggregation": "and_true_aggregation"
+    }
+}
diff --git a/keystone-moon/examples/moon/super_extension/policy/perimeter.json b/keystone-moon/examples/moon/super_extension/policy/perimeter.json
new file mode 100644 (file)
index 0000000..5d51165
--- /dev/null
@@ -0,0 +1,10 @@
+{
+    "subjects": [
+        "admin"
+    ],
+    "objects": [
+        "intra_extension",
+        "mapping",
+        "inter_extension"
+    ]
+}
\ No newline at end of file
diff --git a/keystone-moon/examples/pki/certs/cacert.pem b/keystone-moon/examples/pki/certs/cacert.pem
new file mode 100644 (file)
index 0000000..2f31d12
--- /dev/null
@@ -0,0 +1,23 @@
+-----BEGIN CERTIFICATE-----
+MIID1jCCAr6gAwIBAgIJAKiIU3dYUGKeMA0GCSqGSIb3DQEBBQUAMIGeMQowCAYD
+VQQFEwE1MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVN1bm55
+dmFsZTESMBAGA1UEChMJT3BlblN0YWNrMREwDwYDVQQLEwhLZXlzdG9uZTElMCMG
+CSqGSIb3DQEJARYWa2V5c3RvbmVAb3BlbnN0YWNrLm9yZzEUMBIGA1UEAxMLU2Vs
+ZiBTaWduZWQwIBcNMTMwNzA5MTYyNTAwWhgPMjA3MjAxMDExNjI1MDBaMIGeMQow
+CAYDVQQFEwE1MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVN1
+bm55dmFsZTESMBAGA1UEChMJT3BlblN0YWNrMREwDwYDVQQLEwhLZXlzdG9uZTEl
+MCMGCSqGSIb3DQEJARYWa2V5c3RvbmVAb3BlbnN0YWNrLm9yZzEUMBIGA1UEAxML
+U2VsZiBTaWduZWQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCh1U+N
+3g2cjFi7GeVf21FIv8MDhughFCey9rysAuqFONSFYo2rectLgpDtVy4BFFUFlxmh
+8Ci9TEZ5LiA31tbc4584GxvlLt4dg8aFsUJRBKq0L9i7W5v9uFpHrY1Zr+P4vwG+
+v7IWOuzw19f517eGpp6LLcj2vrpN9Yb63rrydKOqr0KJodMd+vFKmi+euFcPqs6s
+w1OiC5DpJN479CGl2Fs1WzMoKDedRNiXG7ysrVrYQIkfMBABBPIwilq1xXZz9Ybo
+0PbNgOu6xpSsy9hq+IzxcwYsr5CwIcbqW6Ju+Ti2iBEaff20lW7dFzO4kwrcqOr9
+Jnn7qE8YfJo9Hyj3AgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcN
+AQEFBQADggEBAGWFTQTe2FwvwGWa/Bx3Ypc8pJ05ucmGDm8XZiUHj1mOvFHTcveL
+Iofb+vR2lynr+MwF9Dn1szGteVNn/QxrHJIoxsgf1n/9fdyYqjoKWXblNBMt5jhr
+IlMGdQMqHSDzlkZKbcXg5vzHnG5mrwh0rojcZItZznXTSo/XnujEtHwIvCo6rk9c
+tRRzpkcDkg+/SZf2izchsLoEQVsJsIZMnWl0hUGFHaDfx2JQn7bnAcC84wPVhRJ+
+Xa3kDok1r7Nd7Vr/Wf0hCNRxyv2dySD/bq5iCEl1HNik3KCq4eUicTtkGe5N+Was
+ucf1RhPD3oZbxlTX4QDN7grSCdrTESyuhfc=
+-----END CERTIFICATE-----
diff --git a/keystone-moon/examples/pki/certs/middleware.pem b/keystone-moon/examples/pki/certs/middleware.pem
new file mode 100644 (file)
index 0000000..6546753
--- /dev/null
@@ -0,0 +1,50 @@
+-----BEGIN CERTIFICATE-----
+MIIDpjCCAo4CARAwDQYJKoZIhvcNAQEFBQAwgZ4xCjAIBgNVBAUTATUxCzAJBgNV
+BAYTAlVTMQswCQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQK
+EwlPcGVuU3RhY2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZr
+ZXlzdG9uZUBvcGVuc3RhY2sub3JnMRQwEgYDVQQDEwtTZWxmIFNpZ25lZDAgFw0x
+MzA3MDkxNjI1MDBaGA8yMDcyMDEwMTE2MjUwMFowgZAxCzAJBgNVBAYTAlVTMQsw
+CQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQKEwlPcGVuU3Rh
+Y2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZrZXlzdG9uZUBv
+cGVuc3RhY2sub3JnMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB
+AQUAA4IBDwAwggEKAoIBAQC5dpW18l3bs+Mcj/JdhaAa+qw1RJwShm06g+q38ZoC
+cCmRO3/XyHghgHWdVa+FKVm2ug923dE2PW4GSI1pZa3iqbT9Yj70nxN+0l94iym+
+v9/P7irolvo5OWBbBIJT1Ubjps5fJ//gz6BdmwS0FuOy2qfKPnPhyBDH2VawtOgY
+MLk+PSG3YQh7vM2YvDALPTPz/f4qPmhQpb69KBJQElFXPQ9Nu0ABCPWWC2tN87L5
+pakFw5zq46pttSJ7Izc8MXh3KQrh9FvjmiQuRnkMvQ/g887Sp6nEJ22ABPEFhuRr
+89aup6wRD2CkA/8L3zSB5BV7tTK4hQiq07cTnV9Dv6bfAgMBAAEwDQYJKoZIhvcN
+AQEFBQADggEBAIVz3ZwxSUF/y5ABmjnVIQaVVxH97bu07smFQUe0AB2I9R4xnBJ9
+jn93DpeixZvArCZuDuJEJvNER8S6L3r/OPMPrVzayxibXATaZRE8khMWEJpsnyeW
+8paA5NuZJwN2NjlPOmT47J1m7ZjLgkrVwjhwQZPMnh5kG9690TBJNhg9x3Z8f6p3
+iKj2AfZWGhp9Xr2xOZCpfvAZmyvKOMeuHVrRZ2VWGuzojQd7fjSEDw/+Tg8Gw1LV
+BQXjXiKQHsD1YID2a9Pe9yrBjO00ZMxMw8+wN9qrh+8vxfmwTO8tEkmcpvM4ivO3
+/oGGhQh6nSncERVI7rx+wBDnIHKBz6MU2Ow=
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC5dpW18l3bs+Mc
+j/JdhaAa+qw1RJwShm06g+q38ZoCcCmRO3/XyHghgHWdVa+FKVm2ug923dE2PW4G
+SI1pZa3iqbT9Yj70nxN+0l94iym+v9/P7irolvo5OWBbBIJT1Ubjps5fJ//gz6Bd
+mwS0FuOy2qfKPnPhyBDH2VawtOgYMLk+PSG3YQh7vM2YvDALPTPz/f4qPmhQpb69
+KBJQElFXPQ9Nu0ABCPWWC2tN87L5pakFw5zq46pttSJ7Izc8MXh3KQrh9FvjmiQu
+RnkMvQ/g887Sp6nEJ22ABPEFhuRr89aup6wRD2CkA/8L3zSB5BV7tTK4hQiq07cT
+nV9Dv6bfAgMBAAECggEBAIB1K5L/kZUITulMptGyKUgmkjq/D98g7u0Vy/CmTkcc
+Cx6F+LGsL9D8mfplDBKOpo4S530sfKk1+Uwu2ovDGqKhazQJ5ZMnz6gK7Ieg1ERD
+wDDURTIeyKf0HtJMGD0av2QU+GIeYXQEO446PhLCu+n42zkQ8tDS8xSJbCsu0odV
+ok6+i7nEg9sP4uDfAAtM8CUJbRpFTha+m2a7pOz3ylU7/ZV4FDIgJ+FEynaphXAo
+bZE4MX5I7A4DDBp7/9g9HsgefByY4xiABuk7Rsyztyf2TrJEtcsVhiV4sCIIHsow
+u60KGEcTQWj4npBIMgW1QUdrwmAAh/35gOjt9ZndgTkCgYEA2yT5DmihjVaNF65B
+8VtdFcpESr8rr6FBmJ7z31m7MufeV1Inc5GqCK9agRmpr5sTYcgFB9it2IhW2WsA
+xHv+7J04bd9DBtgTv58GWrISsCR/abMZnJrm+F5Rafk77jwjCx/SwFj79ybI83Ia
+VJYMd7jqkxc00+DZT/3QWZqRrlsCgYEA2KeBBqUVdCpwNiJpgFM18HWjJx36HRk7
+YoFapXot/6R6A/rYmS+/goBZt2CWqqGtnXqWEZvH+v4L+WlUmYQrWwtoxpdR1oXz
+EmlCxN7D9MbRVR7QVW24h5zdwPOlbCTGoKzowOs8UEjMfQ81zoMinLmcJgHQSyzs
+OawgSF+DmM0CgYBQz26EELNaMktvKxQoE3/c9CyAv8Q1TKqqxBq8BxPP7s7/tkzU
+AigIcdlW+Aapue7IxQCN5yocShJ0tE+hJPRZfpR7d7P4xx9pLxQhx766c4sEiEXu
+iPSZK/artHuUG1r01DRcN7QabJP3qeDpxjcswuTFfu49H5IjPD5jfGsyNwKBgFjh
+bvdQ5lo/xsUOnQV+HZTGTeaQT7l8TnZ85rkYRKKp0TysvgsqIYDiMuwd/fGGXnlK
+fyI+LG51pmftpD1OkZLKPXOrRHGjhjK5aCDn2rAimGI5P/KsDpXj7r1ntyeEdtAX
+32y1lIrDMtDjWomcFqkBJGQbPl540Xhfeub1+EDJAoGAUZGPT2itKnxEFsa1SKHW
+yLeEsag/a9imAVyizo1WJn2WJaUhi1aHK49w6JRowIAzXXb7zLQt7BL8v+ydPVw3
+eySpXGqFuN/Prm3So0SeWllWcPsKFAzjgE0CWjNuB0GlAZGOaJOcWUNoOZjX/SDC
+FpolIoaSad28tGc8tbEk3fU=
+-----END PRIVATE KEY-----
diff --git a/keystone-moon/examples/pki/certs/signing_cert.pem b/keystone-moon/examples/pki/certs/signing_cert.pem
new file mode 100644 (file)
index 0000000..3129e50
--- /dev/null
@@ -0,0 +1,22 @@
+-----BEGIN CERTIFICATE-----
+MIIDpTCCAo0CAREwDQYJKoZIhvcNAQEFBQAwgZ4xCjAIBgNVBAUTATUxCzAJBgNV
+BAYTAlVTMQswCQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQK
+EwlPcGVuU3RhY2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZr
+ZXlzdG9uZUBvcGVuc3RhY2sub3JnMRQwEgYDVQQDEwtTZWxmIFNpZ25lZDAgFw0x
+MzA3MDkxNjI1MDBaGA8yMDcyMDEwMTE2MjUwMFowgY8xCzAJBgNVBAYTAlVTMQsw
+CQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQKEwlPcGVuU3Rh
+Y2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZrZXlzdG9uZUBv
+cGVuc3RhY2sub3JnMREwDwYDVQQDEwhLZXlzdG9uZTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBAMTC6IdNd9Cg1DshcrT5gRVRF36nEmjSA9QWdik7B925
+PK70U4F6j4pz/5JL7plIo/8rJ4jJz9ccE7m0iA+IuABtEhEwXkG9rj47Oy0J4ZyD
+GSh2K1Bl78PA9zxXSzysUTSjBKdAh29dPYbJY7cgZJ0uC3AtfVceYiAOIi14SdFe
+Z0LZLDXBuLaqUmSMrmKwJ9wAMOCb/jbBP9/3Ycd0GYjlvrSBU4Bqb8/NHasyO4Dp
+PN68OAoyD5r5jUtV8QZN03UjIsoux8e0lrL6+MVtJo0OfWvlSrlzS5HKSryY+uqq
+QEuxtZKpJM2MV85ujvjc8eDSChh2shhDjBem3FIlHKUCAwEAATANBgkqhkiG9w0B
+AQUFAAOCAQEAed9fHgdJrk+gZcO5gsqq6uURfDOuYD66GsSdZw4BqHjYAcnyWq2d
+a+iw7Uxkqu7iLf2k4+Hu3xjDFrce479OwZkSnbXmqB7XspTGOuM8MgT7jB/ypKTO
+Z6qaZKSWK1Hta995hMrVVlhUNBLh0MPGqoVWYA4d7mblujgH9vp+4mpCciJagHks
+8K5FBmI+pobB+uFdSYDoRzX9LTpStspK4e3IoY8baILuGcdKimRNBv6ItG4hMrnt
+Ae1/nWMJyUu5rDTGf2V/vAaS0S/faJBwQSz1o38QHMTWHNspfwIdX3yMqI9u7/vY
+lz3rLy5WdBdUgZrZ3/VLmJTiJVZu5Owq4Q==
+-----END CERTIFICATE-----
diff --git a/keystone-moon/examples/pki/certs/ssl_cert.pem b/keystone-moon/examples/pki/certs/ssl_cert.pem
new file mode 100644 (file)
index 0000000..0b0877e
--- /dev/null
@@ -0,0 +1,22 @@
+-----BEGIN CERTIFICATE-----
+MIIDpjCCAo4CARAwDQYJKoZIhvcNAQEFBQAwgZ4xCjAIBgNVBAUTATUxCzAJBgNV
+BAYTAlVTMQswCQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQK
+EwlPcGVuU3RhY2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZr
+ZXlzdG9uZUBvcGVuc3RhY2sub3JnMRQwEgYDVQQDEwtTZWxmIFNpZ25lZDAgFw0x
+MzA3MDkxNjI1MDBaGA8yMDcyMDEwMTE2MjUwMFowgZAxCzAJBgNVBAYTAlVTMQsw
+CQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQKEwlPcGVuU3Rh
+Y2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZrZXlzdG9uZUBv
+cGVuc3RhY2sub3JnMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB
+AQUAA4IBDwAwggEKAoIBAQC5dpW18l3bs+Mcj/JdhaAa+qw1RJwShm06g+q38ZoC
+cCmRO3/XyHghgHWdVa+FKVm2ug923dE2PW4GSI1pZa3iqbT9Yj70nxN+0l94iym+
+v9/P7irolvo5OWBbBIJT1Ubjps5fJ//gz6BdmwS0FuOy2qfKPnPhyBDH2VawtOgY
+MLk+PSG3YQh7vM2YvDALPTPz/f4qPmhQpb69KBJQElFXPQ9Nu0ABCPWWC2tN87L5
+pakFw5zq46pttSJ7Izc8MXh3KQrh9FvjmiQuRnkMvQ/g887Sp6nEJ22ABPEFhuRr
+89aup6wRD2CkA/8L3zSB5BV7tTK4hQiq07cTnV9Dv6bfAgMBAAEwDQYJKoZIhvcN
+AQEFBQADggEBAIVz3ZwxSUF/y5ABmjnVIQaVVxH97bu07smFQUe0AB2I9R4xnBJ9
+jn93DpeixZvArCZuDuJEJvNER8S6L3r/OPMPrVzayxibXATaZRE8khMWEJpsnyeW
+8paA5NuZJwN2NjlPOmT47J1m7ZjLgkrVwjhwQZPMnh5kG9690TBJNhg9x3Z8f6p3
+iKj2AfZWGhp9Xr2xOZCpfvAZmyvKOMeuHVrRZ2VWGuzojQd7fjSEDw/+Tg8Gw1LV
+BQXjXiKQHsD1YID2a9Pe9yrBjO00ZMxMw8+wN9qrh+8vxfmwTO8tEkmcpvM4ivO3
+/oGGhQh6nSncERVI7rx+wBDnIHKBz6MU2Ow=
+-----END CERTIFICATE-----
diff --git a/keystone-moon/examples/pki/cms/auth_token_revoked.json b/keystone-moon/examples/pki/cms/auth_token_revoked.json
new file mode 100644 (file)
index 0000000..57d3528
--- /dev/null
@@ -0,0 +1,85 @@
+{
+    "access": {
+        "serviceCatalog": [
+            {
+                "endpoints": [
+                    {
+                        "adminURL": "http://127.0.0.1:8776/v1/64b6f3fbcc53435e8a60fcf89bb6617a",
+                        "region": "RegionOne",
+                        "internalURL": "http://127.0.0.1:8776/v1/64b6f3fbcc53435e8a60fcf89bb6617a",
+                        "publicURL": "http://127.0.0.1:8776/v1/64b6f3fbcc53435e8a60fcf89bb6617a"
+                    }
+                ],
+                "endpoints_links": [],
+                "type": "volume",
+                "name": "volume"
+            },
+            {
+                "endpoints": [
+                    {
+                        "adminURL": "http://127.0.0.1:9292/v1",
+                        "region": "RegionOne",
+                        "internalURL": "http://127.0.0.1:9292/v1",
+                        "publicURL": "http://127.0.0.1:9292/v1"
+                    }
+                ],
+                "endpoints_links": [],
+                "type": "image",
+                "name": "glance"
+            },
+            {
+                "endpoints": [
+                    {
+                        "adminURL": "http://127.0.0.1:8774/v1.1/64b6f3fbcc53435e8a60fcf89bb6617a",
+                        "region": "RegionOne",
+                        "internalURL": "http://127.0.0.1:8774/v1.1/64b6f3fbcc53435e8a60fcf89bb6617a",
+                        "publicURL": "http://127.0.0.1:8774/v1.1/64b6f3fbcc53435e8a60fcf89bb6617a"
+                    }
+                ],
+                "endpoints_links": [],
+                "type": "compute",
+                "name": "nova"
+            },
+            {
+                "endpoints": [
+                    {
+                        "adminURL": "http://127.0.0.1:35357/v2.0",
+                        "region": "RegionOne",
+                        "internalURL": "http://127.0.0.1:35357/v2.0",
+                        "publicURL": "http://127.0.0.1:5000/v2.0"
+                    }
+                ],
+                "endpoints_links": [],
+                "type": "identity",
+                "name": "keystone"
+            }
+        ],
+        "token": {
+            "expires": "2012-06-02T14:47:34Z",
+            "id": "placeholder",
+            "tenant": {
+                "enabled": true,
+                "description": null,
+                "name": "tenant_name1",
+                "id": "tenant_id1"
+            }
+        },
+        "user": {
+            "username": "revoked_username1",
+            "roles_links": [
+                "role1",
+                "role2"
+            ],
+            "id": "revoked_user_id1",
+            "roles": [
+                {
+                    "name": "role1"
+                },
+                {
+                    "name": "role2"
+                }
+            ],
+            "name": "revoked_username1"
+        }
+    }
+}
diff --git a/keystone-moon/examples/pki/cms/auth_token_revoked.pem b/keystone-moon/examples/pki/cms/auth_token_revoked.pem
new file mode 100644 (file)
index 0000000..1435c1e
--- /dev/null
@@ -0,0 +1,44 @@
+-----BEGIN CMS-----
+MIIH1wYJKoZIhvcNAQcCoIIHyDCCB8QCAQExCTAHBgUrDgMCGjCCBeQGCSqGSIb3
+DQEHAaCCBdUEggXReyJhY2Nlc3MiOiB7InNlcnZpY2VDYXRhbG9nIjogW3siZW5k
+cG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzEyNy4wLjAuMTo4Nzc2L3Yx
+LzY0YjZmM2ZiY2M1MzQzNWU4YTYwZmNmODliYjY2MTdhIiwgInJlZ2lvbiI6ICJy
+ZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzEyNy4wLjAuMTo4Nzc2
+L3YxLzY0YjZmM2ZiY2M1MzQzNWU4YTYwZmNmODliYjY2MTdhIiwgInB1YmxpY1VS
+TCI6ICJodHRwOi8vMTI3LjAuMC4xOjg3NzYvdjEvNjRiNmYzZmJjYzUzNDM1ZThh
+NjBmY2Y4OWJiNjYxN2EifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUi
+OiAidm9sdW1lIiwgIm5hbWUiOiAidm9sdW1lIn0sIHsiZW5kcG9pbnRzIjogW3si
+YWRtaW5VUkwiOiAiaHR0cDovLzEyNy4wLjAuMTo5MjkyL3YxIiwgInJlZ2lvbiI6
+ICJyZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzEyNy4wLjAuMTo5
+MjkyL3YxIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTI3LjAuMC4xOjkyOTIvdjEi
+fV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiaW1hZ2UiLCAibmFt
+ZSI6ICJnbGFuY2UifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRw
+Oi8vMTI3LjAuMC4xOjg3NzQvdjEuMS82NGI2ZjNmYmNjNTM0MzVlOGE2MGZjZjg5
+YmI2NjE3YSIsICJyZWdpb24iOiAicmVnaW9uT25lIiwgImludGVybmFsVVJMIjog
+Imh0dHA6Ly8xMjcuMC4wLjE6ODc3NC92MS4xLzY0YjZmM2ZiY2M1MzQzNWU4YTYw
+ZmNmODliYjY2MTdhIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTI3LjAuMC4xOjg3
+NzQvdjEuMS82NGI2ZjNmYmNjNTM0MzVlOGE2MGZjZjg5YmI2NjE3YSJ9XSwgImVu
+ZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJjb21wdXRlIiwgIm5hbWUiOiAi
+bm92YSJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xMjcu
+MC4wLjE6MzUzNTcvdjIuMCIsICJyZWdpb24iOiAiUmVnaW9uT25lIiwgImludGVy
+bmFsVVJMIjogImh0dHA6Ly8xMjcuMC4wLjE6MzUzNTcvdjIuMCIsICJwdWJsaWNV
+UkwiOiAiaHR0cDovLzEyNy4wLjAuMTo1MDAwL3YyLjAifV0sICJlbmRwb2ludHNf
+bGlua3MiOiBbXSwgInR5cGUiOiAiaWRlbnRpdHkiLCAibmFtZSI6ICJrZXlzdG9u
+ZSJ9XSwidG9rZW4iOiB7ImV4cGlyZXMiOiAiMjAxMi0wNi0wMlQxNDo0NzozNFoi
+LCAiaWQiOiAicGxhY2Vob2xkZXIiLCAidGVuYW50IjogeyJlbmFibGVkIjogdHJ1
+ZSwgImRlc2NyaXB0aW9uIjogbnVsbCwgIm5hbWUiOiAidGVuYW50X25hbWUxIiwg
+ImlkIjogInRlbmFudF9pZDEifX0sICJ1c2VyIjogeyJ1c2VybmFtZSI6ICJyZXZv
+a2VkX3VzZXJuYW1lMSIsICJyb2xlc19saW5rcyI6IFsicm9sZTEiLCJyb2xlMiJd
+LCAiaWQiOiAicmV2b2tlZF91c2VyX2lkMSIsICJyb2xlcyI6IFt7Im5hbWUiOiAi
+cm9sZTEifSwgeyJuYW1lIjogInJvbGUyIn1dLCAibmFtZSI6ICJyZXZva2VkX3Vz
+ZXJuYW1lMSJ9fX0NCjGCAcowggHGAgEBMIGkMIGeMQowCAYDVQQFEwE1MQswCQYD
+VQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVN1bm55dmFsZTESMBAGA1UE
+ChMJT3BlblN0YWNrMREwDwYDVQQLEwhLZXlzdG9uZTElMCMGCSqGSIb3DQEJARYW
+a2V5c3RvbmVAb3BlbnN0YWNrLm9yZzEUMBIGA1UEAxMLU2VsZiBTaWduZWQCAREw
+BwYFKw4DAhowDQYJKoZIhvcNAQEBBQAEggEAXY8JvllpyctcNlJByPLxhgLyRfFo
+Ew+8Yq3O4FxOyfVkINvOz4EHTipY0M/K8OLwfxpRt7o/iGLGRDBTI6Dd+erXsus8
+NecnNxcWN9RUE2CZhoGj/0nhnNEGF+9Mlv3tMBngwoUJg2paSw/Vn2Q7RaqbOC05
+aZOSDoSX7Zf0DIS/T0ZPnmOUb9+N25M20ctMHksPMEq0qyf2oove0O+WMa/cA8JT
+c2EAhew4WSD0Zv0GOAP30GS+hkNfA1GZTrvCQrpRs9jXhK4dR2bBsnUFVix1BEZ0
+sDhI8cXLvm16IpOO8ov6002ZoZhPn6Qo+0J8QOfdnjiwNnxLOEbuOIwPeQ==
+-----END CMS-----
diff --git a/keystone-moon/examples/pki/cms/auth_token_scoped.json b/keystone-moon/examples/pki/cms/auth_token_scoped.json
new file mode 100644 (file)
index 0000000..31b1044
--- /dev/null
@@ -0,0 +1,85 @@
+{
+    "access": {
+        "serviceCatalog": [
+            {
+                "endpoints": [
+                    {
+                        "adminURL": "http://127.0.0.1:8776/v1/64b6f3fbcc53435e8a60fcf89bb6617a",
+                        "region": "RegionOne",
+                        "internalURL": "http://127.0.0.1:8776/v1/64b6f3fbcc53435e8a60fcf89bb6617a",
+                        "publicURL": "http://127.0.0.1:8776/v1/64b6f3fbcc53435e8a60fcf89bb6617a"
+                    }
+                ],
+                "endpoints_links": [],
+                "type": "volume",
+                "name": "volume"
+            },
+            {
+                "endpoints": [
+                    {
+                        "adminURL": "http://127.0.0.1:9292/v1",
+                        "region": "RegionOne",
+                        "internalURL": "http://127.0.0.1:9292/v1",
+                        "publicURL": "http://127.0.0.1:9292/v1"
+                    }
+                ],
+                "endpoints_links": [],
+                "type": "image",
+                "name": "glance"
+            },
+            {
+                "endpoints": [
+                    {
+                        "adminURL": "http://127.0.0.1:8774/v1.1/64b6f3fbcc53435e8a60fcf89bb6617a",
+                        "region": "RegionOne",
+                        "internalURL": "http://127.0.0.1:8774/v1.1/64b6f3fbcc53435e8a60fcf89bb6617a",
+                        "publicURL": "http://127.0.0.1:8774/v1.1/64b6f3fbcc53435e8a60fcf89bb6617a"
+                    }
+                ],
+                "endpoints_links": [],
+                "type": "compute",
+                "name": "nova"
+            },
+            {
+                "endpoints": [
+                    {
+                        "adminURL": "http://127.0.0.1:35357/v2.0",
+                        "region": "RegionOne",
+                        "internalURL": "http://127.0.0.1:35357/v2.0",
+                        "publicURL": "http://127.0.0.1:5000/v2.0"
+                    }
+                ],
+                "endpoints_links": [],
+                "type": "identity",
+                "name": "keystone"
+            }
+        ],
+        "token": {
+            "expires": "2012-06-02T14:47:34Z",
+            "id": "placeholder",
+            "tenant": {
+                "enabled": true,
+                "description": null,
+                "name": "tenant_name1",
+                "id": "tenant_id1"
+            }
+        },
+        "user": {
+            "username": "user_name1",
+            "roles_links": [
+                "role1",
+                "role2"
+            ],
+            "id": "user_id1",
+            "roles": [
+                {
+                    "name": "role1"
+                },
+                {
+                    "name": "role2"
+                }
+            ],
+            "name": "user_name1"
+        }
+    }
+}
diff --git a/keystone-moon/examples/pki/cms/auth_token_scoped.pem b/keystone-moon/examples/pki/cms/auth_token_scoped.pem
new file mode 100644 (file)
index 0000000..5c02c95
--- /dev/null
@@ -0,0 +1,44 @@
+-----BEGIN CMS-----
+MIIHwQYJKoZIhvcNAQcCoIIHsjCCB64CAQExCTAHBgUrDgMCGjCCBc4GCSqGSIb3
+DQEHAaCCBb8EggW7eyJhY2Nlc3MiOiB7InNlcnZpY2VDYXRhbG9nIjogW3siZW5k
+cG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzEyNy4wLjAuMTo4Nzc2L3Yx
+LzY0YjZmM2ZiY2M1MzQzNWU4YTYwZmNmODliYjY2MTdhIiwgInJlZ2lvbiI6ICJy
+ZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzEyNy4wLjAuMTo4Nzc2
+L3YxLzY0YjZmM2ZiY2M1MzQzNWU4YTYwZmNmODliYjY2MTdhIiwgInB1YmxpY1VS
+TCI6ICJodHRwOi8vMTI3LjAuMC4xOjg3NzYvdjEvNjRiNmYzZmJjYzUzNDM1ZThh
+NjBmY2Y4OWJiNjYxN2EifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUi
+OiAidm9sdW1lIiwgIm5hbWUiOiAidm9sdW1lIn0sIHsiZW5kcG9pbnRzIjogW3si
+YWRtaW5VUkwiOiAiaHR0cDovLzEyNy4wLjAuMTo5MjkyL3YxIiwgInJlZ2lvbiI6
+ICJyZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzEyNy4wLjAuMTo5
+MjkyL3YxIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTI3LjAuMC4xOjkyOTIvdjEi
+fV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiaW1hZ2UiLCAibmFt
+ZSI6ICJnbGFuY2UifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRw
+Oi8vMTI3LjAuMC4xOjg3NzQvdjEuMS82NGI2ZjNmYmNjNTM0MzVlOGE2MGZjZjg5
+YmI2NjE3YSIsICJyZWdpb24iOiAicmVnaW9uT25lIiwgImludGVybmFsVVJMIjog
+Imh0dHA6Ly8xMjcuMC4wLjE6ODc3NC92MS4xLzY0YjZmM2ZiY2M1MzQzNWU4YTYw
+ZmNmODliYjY2MTdhIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTI3LjAuMC4xOjg3
+NzQvdjEuMS82NGI2ZjNmYmNjNTM0MzVlOGE2MGZjZjg5YmI2NjE3YSJ9XSwgImVu
+ZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJjb21wdXRlIiwgIm5hbWUiOiAi
+bm92YSJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xMjcu
+MC4wLjE6MzUzNTcvdjIuMCIsICJyZWdpb24iOiAiUmVnaW9uT25lIiwgImludGVy
+bmFsVVJMIjogImh0dHA6Ly8xMjcuMC4wLjE6MzUzNTcvdjIuMCIsICJwdWJsaWNV
+UkwiOiAiaHR0cDovLzEyNy4wLjAuMTo1MDAwL3YyLjAifV0sICJlbmRwb2ludHNf
+bGlua3MiOiBbXSwgInR5cGUiOiAiaWRlbnRpdHkiLCAibmFtZSI6ICJrZXlzdG9u
+ZSJ9XSwidG9rZW4iOiB7ImV4cGlyZXMiOiAiMjAxMi0wNi0wMlQxNDo0NzozNFoi
+LCAiaWQiOiAicGxhY2Vob2xkZXIiLCAidGVuYW50IjogeyJlbmFibGVkIjogdHJ1
+ZSwgImRlc2NyaXB0aW9uIjogbnVsbCwgIm5hbWUiOiAidGVuYW50X25hbWUxIiwg
+ImlkIjogInRlbmFudF9pZDEifX0sICJ1c2VyIjogeyJ1c2VybmFtZSI6ICJ1c2Vy
+X25hbWUxIiwgInJvbGVzX2xpbmtzIjogWyJyb2xlMSIsInJvbGUyIl0sICJpZCI6
+ICJ1c2VyX2lkMSIsICJyb2xlcyI6IFt7Im5hbWUiOiAicm9sZTEifSwgeyJuYW1l
+IjogInJvbGUyIn1dLCAibmFtZSI6ICJ1c2VyX25hbWUxIn19fQ0KMYIByjCCAcYC
+AQEwgaQwgZ4xCjAIBgNVBAUTATUxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTES
+MBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQKEwlPcGVuU3RhY2sxETAPBgNVBAsT
+CEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZrZXlzdG9uZUBvcGVuc3RhY2sub3Jn
+MRQwEgYDVQQDEwtTZWxmIFNpZ25lZAIBETAHBgUrDgMCGjANBgkqhkiG9w0BAQEF
+AASCAQCAtuVtqTU9h1uaRrYU1eusSnHwD6jizp/xltTrYTyFPfYjhJdglS+bjSeS
+Iau9pN3Tfug98ozUTJ5ByNepAQtxBxPz5bDXhBmAbU6ywaolqRAG+b/s2ShNGQ2a
+tn80NeZmDNbtoqdHVAkD3EZXjsEKr2w+3JTTF2indzczyGe5EeSfNUaT+ZhNEmPR
+Urob62t8atW+zehCSurpaa8pC5m1NcbK8Uu6Y+qO2m08KU9w5kmbOQtWAGCmtpIx
+F2yM1AbSgd90yzen7dv5mNkgZyzQ6SYgRUvkKOKnCyBb97EZK3ZR4qUxQzRYM++8
+g8HdaIfoYVPoPHqODet8Xmhw/Wtp
+-----END CMS-----
diff --git a/keystone-moon/examples/pki/cms/auth_token_unscoped.json b/keystone-moon/examples/pki/cms/auth_token_unscoped.json
new file mode 100644 (file)
index 0000000..5c6d1f8
--- /dev/null
@@ -0,0 +1,23 @@
+{
+    "access": {
+        "token": {
+            "expires": "2012-08-17T15:35:34Z",
+            "id": "01e032c996ef4406b144335915a41e79"
+        },
+        "serviceCatalog": {},
+        "user": {
+            "username": "user_name1",
+            "roles_links": [],
+            "id": "c9c89e3be3ee453fbf00c7966f6d3fbd",
+            "roles": [
+                {
+                    "name": "role1"
+                },
+                {
+                    "name": "role2"
+                }
+            ],
+            "name": "user_name1"
+        }
+    }
+}
diff --git a/keystone-moon/examples/pki/cms/auth_token_unscoped.pem b/keystone-moon/examples/pki/cms/auth_token_unscoped.pem
new file mode 100644 (file)
index 0000000..6064909
--- /dev/null
@@ -0,0 +1,19 @@
+-----BEGIN CMS-----
+MIIDKAYJKoZIhvcNAQcCoIIDGTCCAxUCAQExCTAHBgUrDgMCGjCCATUGCSqGSIb3
+DQEHAaCCASYEggEieyJhY2Nlc3MiOiB7InRva2VuIjogeyJleHBpcmVzIjogIjIw
+MTItMDgtMTdUMTU6MzU6MzRaIiwgImlkIjogIjAxZTAzMmM5OTZlZjQ0MDZiMTQ0
+MzM1OTE1YTQxZTc5In0sICJzZXJ2aWNlQ2F0YWxvZyI6IHt9LCAidXNlciI6IHsi
+dXNlcm5hbWUiOiAidXNlcl9uYW1lMSIsICJyb2xlc19saW5rcyI6IFtdLCAiaWQi
+OiAiYzljODllM2JlM2VlNDUzZmJmMDBjNzk2NmY2ZDNmYmQiLCAicm9sZXMiOiBb
+eyduYW1lJzogJ3JvbGUxJ30seyduYW1lJzogJ3JvbGUyJ30sXSwgIm5hbWUiOiAi
+dXNlcl9uYW1lMSJ9fX0xggHKMIIBxgIBATCBpDCBnjEKMAgGA1UEBRMBNTELMAkG
+A1UEBhMCVVMxCzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTdW5ueXZhbGUxEjAQBgNV
+BAoTCU9wZW5TdGFjazERMA8GA1UECxMIS2V5c3RvbmUxJTAjBgkqhkiG9w0BCQEW
+FmtleXN0b25lQG9wZW5zdGFjay5vcmcxFDASBgNVBAMTC1NlbGYgU2lnbmVkAgER
+MAcGBSsOAwIaMA0GCSqGSIb3DQEBAQUABIIBAFyD9IH2bXsafCTyHEWS28zBuq03
+ZNWXV4+0BfdMbX1ONkaQ7mLGRmfabLHwfE5RaSASFh/Doq7KTc8XrBVfTm9HQPGr
+TLZUawdYlyBFVq0PEE1cPvO9Blz4X/2Awcp/Q67YRd/oLCY2dFWMClMroXu1fy3P
+oFlpWPPhURrbU1GjhUgPIz0IxNGjfWEHVsb5kz7Bo4E8J3pgIkccm97XZZtiCwf7
+DVNj+Eb5mRegGG6IgSSRpZULgnCmSofQ3RnW3jSCkDxLXDQm9IsaaLJsuUFLylGs
+mB/98w9mP192IGl5MVr8/tANXwb5ok2VatUp/Ww1U0IlWbhN374PbK76vcE=
+-----END CMS-----
diff --git a/keystone-moon/examples/pki/cms/revocation_list.json b/keystone-moon/examples/pki/cms/revocation_list.json
new file mode 100644 (file)
index 0000000..9ad9728
--- /dev/null
@@ -0,0 +1,8 @@
+{
+    "revoked": [
+        {
+            "id": "7acfcfdaf6a14aebe97c61c5947bc4d3",
+            "expires": "2012-08-14T17:58:48Z"
+        }
+    ]
+}
diff --git a/keystone-moon/examples/pki/cms/revocation_list.pem b/keystone-moon/examples/pki/cms/revocation_list.pem
new file mode 100644 (file)
index 0000000..bd22d3f
--- /dev/null
@@ -0,0 +1,15 @@
+-----BEGIN CMS-----
+MIICWgYJKoZIhvcNAQcCoIICSzCCAkcCAQExCTAHBgUrDgMCGjBpBgkqhkiG9w0B
+BwGgXARaeyJyZXZva2VkIjpbeyJpZCI6IjdhY2ZjZmRhZjZhMTRhZWJlOTdjNjFj
+NTk0N2JjNGQzIiwiZXhwaXJlcyI6IjIwMTItMDgtMTRUMTc6NTg6NDhaIn1dfQ0K
+MYIByjCCAcYCAQEwgaQwgZ4xCjAIBgNVBAUTATUxCzAJBgNVBAYTAlVTMQswCQYD
+VQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQKEwlPcGVuU3RhY2sx
+ETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZrZXlzdG9uZUBvcGVu
+c3RhY2sub3JnMRQwEgYDVQQDEwtTZWxmIFNpZ25lZAIBETAHBgUrDgMCGjANBgkq
+hkiG9w0BAQEFAASCAQC2f05VHM7zjNT3TBO80AmZ00n7AEWUjbFe5nqIM8kWGM83
+01Bi3uU/nQ0daAd3tqCmDL2EfETAjD+xnIzjlN6eIA74Vy51wFD/KiyWYPWzw8mH
+WcATHmE4E8kLdt8NhUodCY9TCFxcHJNDR1Eai/U7hH+5O4p9HcmMjv/GWegZL6HB
+Up9Cxu6haxvPFmYylzM6Qt0Ad/WiO/JZLPTA4qXJEJSa9EMFMb0c2wSDSn30swJe
+7J79VTFktTr2djv8KFvaHr4vLFYv2Y3ZkTeHqam0m91vllxLZJUP5QTSHjjY6LFE
+5eEjIlOv9wOOm1uTtPIq6pxCugU1Wm7gstkqr55R
+-----END CMS-----
diff --git a/keystone-moon/examples/pki/gen_pki.sh b/keystone-moon/examples/pki/gen_pki.sh
new file mode 100755 (executable)
index 0000000..6555026
--- /dev/null
@@ -0,0 +1,221 @@
+#!/bin/bash
+
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This script generates the crypto necessary for the SSL tests.
+
+DIR=`dirname "$0"`
+CURRENT_DIR=`cd "$DIR" && pwd`
+CERTS_DIR=$CURRENT_DIR/certs
+PRIVATE_DIR=$CURRENT_DIR/private
+CMS_DIR=$CURRENT_DIR/cms
+
+
+function rm_old {
+    rm -rf $CERTS_DIR/*.pem
+    rm -rf $PRIVATE_DIR/*.pem
+}
+
+function cleanup {
+    rm -rf *.conf > /dev/null 2>&1
+    rm -rf index* > /dev/null 2>&1
+    rm -rf *.crt > /dev/null 2>&1
+    rm -rf newcerts > /dev/null 2>&1
+    rm -rf *.pem > /dev/null 2>&1
+    rm -rf serial* > /dev/null 2>&1
+}
+
+function generate_ca_conf {
+    echo '
+[ req ]
+default_bits            = 2048
+default_keyfile         = cakey.pem
+default_md              = default
+
+prompt                  = no
+distinguished_name      = ca_distinguished_name
+
+x509_extensions         = ca_extensions
+
+[ ca_distinguished_name ]
+serialNumber            = 5
+countryName             = US
+stateOrProvinceName     = CA
+localityName            = Sunnyvale
+organizationName        = OpenStack
+organizationalUnitName  = Keystone
+emailAddress            = keystone@openstack.org
+commonName              = Self Signed
+
+[ ca_extensions ]
+basicConstraints        = critical,CA:true
+' > ca.conf
+}
+
+function generate_ssl_req_conf {
+    echo '
+[ req ]
+default_bits            = 2048
+default_keyfile         = keystonekey.pem
+default_md              = default
+
+prompt                  = no
+distinguished_name      = distinguished_name
+
+[ distinguished_name ]
+countryName             = US
+stateOrProvinceName     = CA
+localityName            = Sunnyvale
+organizationName        = OpenStack
+organizationalUnitName  = Keystone
+commonName              = localhost
+emailAddress            = keystone@openstack.org
+' > ssl_req.conf
+}
+
+function generate_cms_signing_req_conf {
+    echo '
+[ req ]
+default_bits            = 2048
+default_keyfile         = keystonekey.pem
+default_md              = default
+
+prompt                  = no
+distinguished_name      = distinguished_name
+
+[ distinguished_name ]
+countryName             = US
+stateOrProvinceName     = CA
+localityName            = Sunnyvale
+organizationName        = OpenStack
+organizationalUnitName  = Keystone
+commonName              = Keystone
+emailAddress            = keystone@openstack.org
+' > cms_signing_req.conf
+}
+
+function generate_signing_conf {
+    echo '
+[ ca ]
+default_ca      = signing_ca
+
+[ signing_ca ]
+dir             = .
+database        = $dir/index.txt
+new_certs_dir   = $dir/newcerts
+
+certificate     = $dir/certs/cacert.pem
+serial          = $dir/serial
+private_key     = $dir/private/cakey.pem
+
+default_days            = 21360
+default_crl_days        = 30
+default_md              = default
+
+policy                  = policy_any
+
+[ policy_any ]
+countryName             = supplied
+stateOrProvinceName     = supplied
+localityName            = optional
+organizationName        = supplied
+organizationalUnitName  = supplied
+emailAddress            = supplied
+commonName              = supplied
+' > signing.conf
+}
+
+function setup {
+    touch index.txt
+    echo '10' > serial
+    generate_ca_conf
+    mkdir newcerts
+}
+
+function check_error {
+    if [ $1 != 0 ] ; then
+        echo "Failed! rc=${1}"
+        echo 'Bailing ...'
+        cleanup
+        exit $1
+    else
+        echo 'Done'
+    fi
+}
+
+function generate_ca {
+    echo 'Generating New CA Certificate ...'
+    openssl req -x509 -newkey rsa:2048 -days 21360 -out $CERTS_DIR/cacert.pem -keyout $PRIVATE_DIR/cakey.pem -outform PEM -config ca.conf -nodes
+    check_error $?
+}
+
+function ssl_cert_req {
+    echo 'Generating SSL Certificate Request ...'
+    generate_ssl_req_conf
+    openssl req -newkey rsa:2048 -keyout $PRIVATE_DIR/ssl_key.pem -keyform PEM -out ssl_req.pem -outform PEM -config ssl_req.conf -nodes
+    check_error $?
+    #openssl req -in req.pem -text -noout
+}
+
+function cms_signing_cert_req {
+    echo 'Generating CMS Signing Certificate Request ...'
+    generate_cms_signing_req_conf
+    openssl req -newkey rsa:2048 -keyout $PRIVATE_DIR/signing_key.pem -keyform PEM -out cms_signing_req.pem -outform PEM -config cms_signing_req.conf -nodes
+    check_error $?
+    #openssl req -in req.pem -text -noout
+}
+
+function issue_certs {
+    generate_signing_conf
+    echo 'Issuing SSL Certificate ...'
+    openssl ca -in ssl_req.pem -config signing.conf -batch
+    check_error $?
+    openssl x509 -in $CURRENT_DIR/newcerts/10.pem -out $CERTS_DIR/ssl_cert.pem
+    check_error $?
+    echo 'Issuing CMS Signing Certificate ...'
+    openssl ca -in cms_signing_req.pem -config signing.conf -batch
+    check_error $?
+    openssl x509 -in $CURRENT_DIR/newcerts/11.pem -out $CERTS_DIR/signing_cert.pem
+    check_error $?
+}
+
+function create_middleware_cert {
+    cp $CERTS_DIR/ssl_cert.pem $CERTS_DIR/middleware.pem
+    cat $PRIVATE_DIR/ssl_key.pem >> $CERTS_DIR/middleware.pem
+}
+
+function check_openssl {
+    echo 'Checking openssl availability ...'
+    which openssl
+    check_error $?
+}
+
+function gen_sample_cms {
+    for json_file in "${CMS_DIR}/auth_token_revoked.json" "${CMS_DIR}/auth_token_unscoped.json" "${CMS_DIR}/auth_token_scoped.json" "${CMS_DIR}/revocation_list.json"; do
+        openssl cms -sign -in $json_file -nosmimecap -signer $CERTS_DIR/signing_cert.pem -inkey $PRIVATE_DIR/signing_key.pem -outform PEM -nodetach -nocerts -noattr -out ${json_file/.json/.pem}
+    done
+}
+
+check_openssl
+rm_old
+cleanup
+setup
+generate_ca
+ssl_cert_req
+cms_signing_cert_req
+issue_certs
+create_middleware_cert
+gen_sample_cms
+cleanup
diff --git a/keystone-moon/examples/pki/private/cakey.pem b/keystone-moon/examples/pki/private/cakey.pem
new file mode 100644 (file)
index 0000000..86ff4cf
--- /dev/null
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCh1U+N3g2cjFi7
+GeVf21FIv8MDhughFCey9rysAuqFONSFYo2rectLgpDtVy4BFFUFlxmh8Ci9TEZ5
+LiA31tbc4584GxvlLt4dg8aFsUJRBKq0L9i7W5v9uFpHrY1Zr+P4vwG+v7IWOuzw
+19f517eGpp6LLcj2vrpN9Yb63rrydKOqr0KJodMd+vFKmi+euFcPqs6sw1OiC5Dp
+JN479CGl2Fs1WzMoKDedRNiXG7ysrVrYQIkfMBABBPIwilq1xXZz9Ybo0PbNgOu6
+xpSsy9hq+IzxcwYsr5CwIcbqW6Ju+Ti2iBEaff20lW7dFzO4kwrcqOr9Jnn7qE8Y
+fJo9Hyj3AgMBAAECggEAPeEVaTaF190mNGyDczKmEv4X8CpOag+N2nVT0SXQTJ5d
+TJ9RckbAwB+tkMLr+Uev9tI+39e3jCI1NDK56QAB6jYy9D4RXYGdNoXji80qgVYa
+e4lsAr/Vlp8+DfhDew6xSbSnUytzSeLAJJsznvmn2Bmvt6ILHKXzEMoYEabGrtvk
+0n31mmd6sszW6i1cYEhr3gK/VXaO4gM1oWit9aeIJDg3/D3UNUW7aoCTeCz91Gif
+87/JH3UIPEIt960jb3oV7ltajRSpiSOfefJFwz/2n09+/P/Sg1+SWAraqkqaLqhO
+zoslYSYUuOQv+j97iD/tDVBjiWR1TrzQjf/3noOl+QKBgQDTExaIe0YYI8KdBNZ6
+1cG3vztNWDh0PaP1n0n/bJYAGmAfxfn/gSrABXfeIAjy01f76EK2lPa/i8+DR7vL
+dJnUMO10OxaIZKr+OtR1XrMM6kREj6H5yHTNz0sJ3hDEfwJ1BndqwrXlCLAe7upe
+veXI9LVfPjPVmf8t9UwyxtaNiwKBgQDERzCGEuyKIeSfgytcdknJ0W+AbdkshC92
+tZQPbI35YOLac2/y7GMjjf5Xg5VJRIYwXAG8ha+61Tvd7+qCVdzNyYfyOoBEE69B
+Gc9UdpXRfIjxokfidqh7mIIfjFNSI/UyVmvL9wrregXPcM+s7OlLC/0O82gOcNxU
+GKF3oP5XxQKBgQCPZEZIjcZ+m7yYQzMZ26FwnL9Cug4QGdgLAx2YIkJ8624l568A
+ftV2AcD+67Boll8NSSoZM3W1htuAifjwLNRcLKkD7yhNnGX1tC2lVqI4weWC1jjp
+od6H+q01lOC7PLWEntH9ey1q3M4ZFaGunz89l9CnVXCNScLri9sqG56iJQKBgHOc
+50UiInhe7HbU4ZauClq5Za9FhRXGqtqGrDbFn38UBavdMUTq3p6Txgwwcp/coBoe
+J9uu90razU+2QPESuGPy4IPa17DB04pKNKiwzSC+9T83cpY/hJCAzazdkDqi+Yv0
+Abz7wE/h6Ug+T+WxCt3sqtvCnjlbWzyh4YJAr3BtAoGBAIibPCEfVOwOfMOXkhIb
+liRVVGNxXQa6MwGVVfyR9gmlM85IjcBjh+Tf5+v3Mo286OlzLXQjfYW5pXR5Mgaw
+bKe+z5AqJlOsA+lJGTyCNnPKwaXAYHt8dZ41WhgzekibHCx7EQ+8jH1jkz2Gwou6
+MDbnRu+e0FCyRFSuhB9Cim/K
+-----END PRIVATE KEY-----
diff --git a/keystone-moon/examples/pki/private/signing_key.pem b/keystone-moon/examples/pki/private/signing_key.pem
new file mode 100644 (file)
index 0000000..acf8476
--- /dev/null
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDEwuiHTXfQoNQ7
+IXK0+YEVURd+pxJo0gPUFnYpOwfduTyu9FOBeo+Kc/+SS+6ZSKP/KyeIyc/XHBO5
+tIgPiLgAbRIRMF5Bva4+OzstCeGcgxkoditQZe/DwPc8V0s8rFE0owSnQIdvXT2G
+yWO3IGSdLgtwLX1XHmIgDiIteEnRXmdC2Sw1wbi2qlJkjK5isCfcADDgm/42wT/f
+92HHdBmI5b60gVOAam/PzR2rMjuA6TzevDgKMg+a+Y1LVfEGTdN1IyLKLsfHtJay
++vjFbSaNDn1r5Uq5c0uRykq8mPrqqkBLsbWSqSTNjFfObo743PHg0goYdrIYQ4wX
+ptxSJRylAgMBAAECggEBAIDQPVz/CXarI+ZGQotaYPisqx3+kN3QyDLcNaVOgRrW
+P3UmfVjh/QEeae3ECkONu9e8z9gMjyX7uqo0F3NcBWI6Bb79FGgjnuQc8OPOeUZ2
+yUyk+DxdT/eu5+04FQh2o387TjuU0lXFDBem1sI30cbZMyHQliMnwAPOXO+5tVH8
+PusGNBMVvoCyfnj52uVjmAjPqLXyOMcKEhuJFbhnUURKvzkHRf43SWQsb081eh2m
+ACQ7uNzX7vg3aPXxSZXY2+hHX67POdqosjddu6CfoXcEHAOAUujvTOFvd1gGRkRo
+uOi5hNQqcN5uaqeq9enVThINDyFMzngZBhMCzRTWeK0CgYEA4qUhB7lJZLt9niDW
+4Fudda1Pzu3XfxHsSG4D+xx5LunKb3ChG5x7PSLJvusfvnkm5fqhEEhbSVARo6Vn
+AAA52u5SPDDNwyk1ttvBR/Fc7eGwpbRQry2I6ui6baKiIOSV2K3vJlsSK8/GMQqu
+j0fstJuSvQR7Y6NUYxlWi+VNussCgYEA3j7tFAdGFc5JkeTHSzsU4h2+17uVDSSi
+yr7Duc9+9fwAbsO4go9x1CAOvV2r0WX10jPsTGg1d31pWLvJrS6QsAffmM+A0QIT
+eBX+umcavXWy69VExWa0xKU9wTE/nQvX9Fr8A+Klh/WfMcvoomK2zgOKoRSmes04
+WKYlHWsSaE8CgYBUYcZ6abG5n1SVmwRlY7asKWqdUE/7L2EZVlyFEYTMwp5r/zL8
+ZLY9fMZAHqoi8FhbJ4Tv2wChuv3WP66pgWwI5tIXNtRk5OLqwcakUmiW6IAsMYYY
+sotXam5+gx55wKFJmvh+/0k0ppbTi3aSQeUPGRz44sJNxnGUs8pVK3pVIQKBgQDD
+ga+lEtEAlbv6b7sx3wN79pbPyOBR84yRtkcPygzx74Gh7uL9V5rW9GyDAUgIqR0a
+kTqp7HI8b0KhIHFFu9TkRcjY8JFtS9o8pXy0FcdcK5H+DFq3HKag5ovwy5YeXTDY
+cMGJ2XOsqtIkSDCZySTvDgaBtVzOYoHS2jWEL5C92QKBgGmL2juXIB+HAi7UuKPg
+nWkVTikt5Zr2GNgYtso75E7+ljaRuf4D9eEBiOD1qYKQm8KvsiVzEs71BSmT1p1C
+b2hlM/5Crb7KumIkHTARQFr5NPwuBZ6NA6RLnd++vKi0WgOJtDAlR3bgwugfQdzZ
+4Isaq9Rgfa/EHCKB2weQ7c3r
+-----END PRIVATE KEY-----
diff --git a/keystone-moon/examples/pki/private/ssl_key.pem b/keystone-moon/examples/pki/private/ssl_key.pem
new file mode 100644 (file)
index 0000000..e2e6837
--- /dev/null
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC5dpW18l3bs+Mc
+j/JdhaAa+qw1RJwShm06g+q38ZoCcCmRO3/XyHghgHWdVa+FKVm2ug923dE2PW4G
+SI1pZa3iqbT9Yj70nxN+0l94iym+v9/P7irolvo5OWBbBIJT1Ubjps5fJ//gz6Bd
+mwS0FuOy2qfKPnPhyBDH2VawtOgYMLk+PSG3YQh7vM2YvDALPTPz/f4qPmhQpb69
+KBJQElFXPQ9Nu0ABCPWWC2tN87L5pakFw5zq46pttSJ7Izc8MXh3KQrh9FvjmiQu
+RnkMvQ/g887Sp6nEJ22ABPEFhuRr89aup6wRD2CkA/8L3zSB5BV7tTK4hQiq07cT
+nV9Dv6bfAgMBAAECggEBAIB1K5L/kZUITulMptGyKUgmkjq/D98g7u0Vy/CmTkcc
+Cx6F+LGsL9D8mfplDBKOpo4S530sfKk1+Uwu2ovDGqKhazQJ5ZMnz6gK7Ieg1ERD
+wDDURTIeyKf0HtJMGD0av2QU+GIeYXQEO446PhLCu+n42zkQ8tDS8xSJbCsu0odV
+ok6+i7nEg9sP4uDfAAtM8CUJbRpFTha+m2a7pOz3ylU7/ZV4FDIgJ+FEynaphXAo
+bZE4MX5I7A4DDBp7/9g9HsgefByY4xiABuk7Rsyztyf2TrJEtcsVhiV4sCIIHsow
+u60KGEcTQWj4npBIMgW1QUdrwmAAh/35gOjt9ZndgTkCgYEA2yT5DmihjVaNF65B
+8VtdFcpESr8rr6FBmJ7z31m7MufeV1Inc5GqCK9agRmpr5sTYcgFB9it2IhW2WsA
+xHv+7J04bd9DBtgTv58GWrISsCR/abMZnJrm+F5Rafk77jwjCx/SwFj79ybI83Ia
+VJYMd7jqkxc00+DZT/3QWZqRrlsCgYEA2KeBBqUVdCpwNiJpgFM18HWjJx36HRk7
+YoFapXot/6R6A/rYmS+/goBZt2CWqqGtnXqWEZvH+v4L+WlUmYQrWwtoxpdR1oXz
+EmlCxN7D9MbRVR7QVW24h5zdwPOlbCTGoKzowOs8UEjMfQ81zoMinLmcJgHQSyzs
+OawgSF+DmM0CgYBQz26EELNaMktvKxQoE3/c9CyAv8Q1TKqqxBq8BxPP7s7/tkzU
+AigIcdlW+Aapue7IxQCN5yocShJ0tE+hJPRZfpR7d7P4xx9pLxQhx766c4sEiEXu
+iPSZK/artHuUG1r01DRcN7QabJP3qeDpxjcswuTFfu49H5IjPD5jfGsyNwKBgFjh
+bvdQ5lo/xsUOnQV+HZTGTeaQT7l8TnZ85rkYRKKp0TysvgsqIYDiMuwd/fGGXnlK
+fyI+LG51pmftpD1OkZLKPXOrRHGjhjK5aCDn2rAimGI5P/KsDpXj7r1ntyeEdtAX
+32y1lIrDMtDjWomcFqkBJGQbPl540Xhfeub1+EDJAoGAUZGPT2itKnxEFsa1SKHW
+yLeEsag/a9imAVyizo1WJn2WJaUhi1aHK49w6JRowIAzXXb7zLQt7BL8v+ydPVw3
+eySpXGqFuN/Prm3So0SeWllWcPsKFAzjgE0CWjNuB0GlAZGOaJOcWUNoOZjX/SDC
+FpolIoaSad28tGc8tbEk3fU=
+-----END PRIVATE KEY-----
diff --git a/keystone-moon/httpd/README b/keystone-moon/httpd/README
new file mode 100644 (file)
index 0000000..c4f5a80
--- /dev/null
@@ -0,0 +1,2 @@
+Documentation how to set up Keystone to run with Apache HTTPD is in
+doc/source/apache-httpd.rst
diff --git a/keystone-moon/httpd/keystone.py b/keystone-moon/httpd/keystone.py
new file mode 100644 (file)
index 0000000..0c7018f
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright 2013 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+
+from keystone.server import wsgi as wsgi_server
+
+
+name = os.path.basename(__file__)
+
+# NOTE(ldbragst): 'application' is required in this context by WSGI spec.
+# The following is a reference to Python Paste Deploy documentation
+# http://pythonpaste.org/deploy/
+application = wsgi_server.initialize_application(name)
diff --git a/keystone-moon/httpd/wsgi-keystone.conf b/keystone-moon/httpd/wsgi-keystone.conf
new file mode 100644 (file)
index 0000000..f191818
--- /dev/null
@@ -0,0 +1,28 @@
+Listen 5000
+Listen 35357
+
+<VirtualHost *:5000>
+    WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone display-name=%{GROUP}
+    WSGIProcessGroup keystone-public
+    WSGIScriptAlias / /var/www/cgi-bin/keystone/main
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    <IfVersion >= 2.4>
+      ErrorLogFormat "%{cu}t %M"
+    </IfVersion>
+    ErrorLog /var/log/apache2/keystone.log
+    CustomLog /var/log/apache2/keystone_access.log combined
+</VirtualHost>
+
+<VirtualHost *:35357>
+    WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone display-name=%{GROUP}
+    WSGIProcessGroup keystone-admin
+    WSGIScriptAlias / /var/www/cgi-bin/keystone/admin
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    <IfVersion >= 2.4>
+      ErrorLogFormat "%{cu}t %M"
+    </IfVersion>
+    ErrorLog /var/log/apache2/keystone.log
+    CustomLog /var/log/apache2/keystone_access.log combined
+</VirtualHost>
diff --git a/keystone-moon/keystone/__init__.py b/keystone-moon/keystone/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/assignment/__init__.py b/keystone-moon/keystone/assignment/__init__.py
new file mode 100644 (file)
index 0000000..49ad759
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.assignment import controllers  # noqa
+from keystone.assignment.core import *  # noqa
+from keystone.assignment import routers  # noqa
diff --git a/keystone-moon/keystone/assignment/backends/__init__.py b/keystone-moon/keystone/assignment/backends/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/assignment/backends/ldap.py b/keystone-moon/keystone/assignment/backends/ldap.py
new file mode 100644 (file)
index 0000000..f93e989
--- /dev/null
@@ -0,0 +1,531 @@
+# Copyright 2012-2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from __future__ import absolute_import
+
+import ldap as ldap
+import ldap.filter
+from oslo_config import cfg
+from oslo_log import log
+
+from keystone import assignment
+from keystone.assignment.role_backends import ldap as ldap_role
+from keystone.common import ldap as common_ldap
+from keystone.common import models
+from keystone import exception
+from keystone.i18n import _
+from keystone.identity.backends import ldap as ldap_identity
+from keystone.openstack.common import versionutils
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+class Assignment(assignment.Driver):
+    @versionutils.deprecated(
+        versionutils.deprecated.KILO,
+        remove_in=+2,
+        what='keystone.assignment.backends.ldap.Assignment')
+    def __init__(self):
+        super(Assignment, self).__init__()
+        self.LDAP_URL = CONF.ldap.url
+        self.LDAP_USER = CONF.ldap.user
+        self.LDAP_PASSWORD = CONF.ldap.password
+        self.suffix = CONF.ldap.suffix
+
+        # This is the only deep dependency from assignment back to identity.
+        # This is safe to do since if you are using LDAP for assignment, it is
+        # required that you are using it for identity as well.
+        self.user = ldap_identity.UserApi(CONF)
+        self.group = ldap_identity.GroupApi(CONF)
+
+        self.project = ProjectApi(CONF)
+        self.role = RoleApi(CONF, self.user)
+
+    def default_role_driver(self):
+        return 'keystone.assignment.role_backends.ldap.Role'
+
+    def default_resource_driver(self):
+        return 'keystone.resource.backends.ldap.Resource'
+
+    def list_role_ids_for_groups_on_project(
+            self, groups, project_id, project_domain_id, project_parents):
+        group_dns = [self.group._id_to_dn(group_id) for group_id in groups]
+        role_list = [self.role._dn_to_id(role_assignment.role_dn)
+                     for role_assignment in self.role.get_role_assignments
+                     (self.project._id_to_dn(project_id))
+                     if role_assignment.user_dn.upper() in group_dns]
+        # NOTE(morganfainberg): Does not support OS-INHERIT as domain
+        # metadata/roles are not supported by LDAP backend. Skip OS-INHERIT
+        # logic.
+        return role_list
+
+    def _get_metadata(self, user_id=None, tenant_id=None,
+                      domain_id=None, group_id=None):
+
+        def _get_roles_for_just_user_and_project(user_id, tenant_id):
+            user_dn = self.user._id_to_dn(user_id)
+            return [self.role._dn_to_id(a.role_dn)
+                    for a in self.role.get_role_assignments
+                    (self.project._id_to_dn(tenant_id))
+                    if common_ldap.is_dn_equal(a.user_dn, user_dn)]
+
+        def _get_roles_for_group_and_project(group_id, project_id):
+            group_dn = self.group._id_to_dn(group_id)
+            return [self.role._dn_to_id(a.role_dn)
+                    for a in self.role.get_role_assignments
+                    (self.project._id_to_dn(project_id))
+                    if common_ldap.is_dn_equal(a.user_dn, group_dn)]
+
+        if domain_id is not None:
+            msg = _('Domain metadata not supported by LDAP')
+            raise exception.NotImplemented(message=msg)
+        if group_id is None and user_id is None:
+            return {}
+
+        if tenant_id is None:
+            return {}
+        if user_id is None:
+            metadata_ref = _get_roles_for_group_and_project(group_id,
+                                                            tenant_id)
+        else:
+            metadata_ref = _get_roles_for_just_user_and_project(user_id,
+                                                                tenant_id)
+        if not metadata_ref:
+            return {}
+        return {'roles': [self._role_to_dict(r, False) for r in metadata_ref]}
+
+    def list_project_ids_for_user(self, user_id, group_ids, hints,
+                                  inherited=False):
+        # TODO(henry-nash): The ldap driver does not support inherited
+        # assignments, so the inherited parameter is unused.
+        # See bug #1404273.
+        user_dn = self.user._id_to_dn(user_id)
+        associations = (self.role.list_project_roles_for_user
+                        (user_dn, self.project.tree_dn))
+
+        for group_id in group_ids:
+            group_dn = self.group._id_to_dn(group_id)
+            for group_role in self.role.list_project_roles_for_group(
+                    group_dn, self.project.tree_dn):
+                associations.append(group_role)
+
+        return list(set(
+            [self.project._dn_to_id(x.project_dn) for x in associations]))
+
+    def list_role_ids_for_groups_on_domain(self, group_ids, domain_id):
+        raise exception.NotImplemented()
+
+    def list_project_ids_for_groups(self, group_ids, hints,
+                                    inherited=False):
+        raise exception.NotImplemented()
+
+    def list_domain_ids_for_user(self, user_id, group_ids, hints):
+        raise exception.NotImplemented()
+
+    def list_domain_ids_for_groups(self, group_ids, inherited=False):
+        raise exception.NotImplemented()
+
+    def list_user_ids_for_project(self, tenant_id):
+        tenant_dn = self.project._id_to_dn(tenant_id)
+        rolegrants = self.role.get_role_assignments(tenant_dn)
+        return [self.user._dn_to_id(user_dn) for user_dn in
+                self.project.get_user_dns(tenant_id, rolegrants)]
+
+    def _subrole_id_to_dn(self, role_id, tenant_id):
+        if tenant_id is None:
+            return self.role._id_to_dn(role_id)
+        else:
+            return '%s=%s,%s' % (self.role.id_attr,
+                                 ldap.dn.escape_dn_chars(role_id),
+                                 self.project._id_to_dn(tenant_id))
+
+    def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
+        user_dn = self.user._id_to_dn(user_id)
+        role_dn = self._subrole_id_to_dn(role_id, tenant_id)
+        self.role.add_user(role_id, role_dn, user_dn, user_id, tenant_id)
+        tenant_dn = self.project._id_to_dn(tenant_id)
+        return UserRoleAssociation(role_dn=role_dn,
+                                   user_dn=user_dn,
+                                   tenant_dn=tenant_dn)
+
+    def _add_role_to_group_and_project(self, group_id, tenant_id, role_id):
+        group_dn = self.group._id_to_dn(group_id)
+        role_dn = self._subrole_id_to_dn(role_id, tenant_id)
+        self.role.add_user(role_id, role_dn, group_dn, group_id, tenant_id)
+        tenant_dn = self.project._id_to_dn(tenant_id)
+        return GroupRoleAssociation(group_dn=group_dn,
+                                    role_dn=role_dn,
+                                    tenant_dn=tenant_dn)
+
+    def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
+        role_dn = self._subrole_id_to_dn(role_id, tenant_id)
+        return self.role.delete_user(role_dn,
+                                     self.user._id_to_dn(user_id), role_id)
+
+    def _remove_role_from_group_and_project(self, group_id, tenant_id,
+                                            role_id):
+        role_dn = self._subrole_id_to_dn(role_id, tenant_id)
+        return self.role.delete_user(role_dn,
+                                     self.group._id_to_dn(group_id), role_id)
+
+# Bulk actions on User From identity
+    def delete_user(self, user_id):
+        user_dn = self.user._id_to_dn(user_id)
+        for ref in self.role.list_global_roles_for_user(user_dn):
+            self.role.delete_user(ref.role_dn, ref.user_dn,
+                                  self.role._dn_to_id(ref.role_dn))
+        for ref in self.role.list_project_roles_for_user(user_dn,
+                                                         self.project.tree_dn):
+            self.role.delete_user(ref.role_dn, ref.user_dn,
+                                  self.role._dn_to_id(ref.role_dn))
+
+    def delete_group(self, group_id):
+        """Called when the group was deleted.
+
+        Any role assignments for the group should be cleaned up.
+
+        """
+        group_dn = self.group._id_to_dn(group_id)
+        group_role_assignments = self.role.list_project_roles_for_group(
+            group_dn, self.project.tree_dn)
+        for ref in group_role_assignments:
+            self.role.delete_user(ref.role_dn, ref.group_dn,
+                                  self.role._dn_to_id(ref.role_dn))
+
+    def create_grant(self, role_id, user_id=None, group_id=None,
+                     domain_id=None, project_id=None,
+                     inherited_to_projects=False):
+
+        try:
+            metadata_ref = self._get_metadata(user_id, project_id,
+                                              domain_id, group_id)
+        except exception.MetadataNotFound:
+            metadata_ref = {}
+
+        if user_id is None:
+            metadata_ref['roles'] = self._add_role_to_group_and_project(
+                group_id, project_id, role_id)
+        else:
+            metadata_ref['roles'] = self.add_role_to_user_and_project(
+                user_id, project_id, role_id)
+
+    def check_grant_role_id(self, role_id, user_id=None, group_id=None,
+                            domain_id=None, project_id=None,
+                            inherited_to_projects=False):
+
+        try:
+            metadata_ref = self._get_metadata(user_id, project_id,
+                                              domain_id, group_id)
+        except exception.MetadataNotFound:
+            metadata_ref = {}
+        role_ids = set(self._roles_from_role_dicts(
+            metadata_ref.get('roles', []), inherited_to_projects))
+        if role_id not in role_ids:
+            actor_id = user_id or group_id
+            target_id = domain_id or project_id
+            raise exception.RoleAssignmentNotFound(role_id=role_id,
+                                                   actor_id=actor_id,
+                                                   target_id=target_id)
+
+    def delete_grant(self, role_id, user_id=None, group_id=None,
+                     domain_id=None, project_id=None,
+                     inherited_to_projects=False):
+
+        try:
+            metadata_ref = self._get_metadata(user_id, project_id,
+                                              domain_id, group_id)
+        except exception.MetadataNotFound:
+            metadata_ref = {}
+
+        try:
+            if user_id is None:
+                metadata_ref['roles'] = (
+                    self._remove_role_from_group_and_project(
+                        group_id, project_id, role_id))
+            else:
+                metadata_ref['roles'] = self.remove_role_from_user_and_project(
+                    user_id, project_id, role_id)
+        except (exception.RoleNotFound, KeyError):
+            actor_id = user_id or group_id
+            target_id = domain_id or project_id
+            raise exception.RoleAssignmentNotFound(role_id=role_id,
+                                                   actor_id=actor_id,
+                                                   target_id=target_id)
+
+    def list_grant_role_ids(self, user_id=None, group_id=None,
+                            domain_id=None, project_id=None,
+                            inherited_to_projects=False):
+
+        try:
+            metadata_ref = self._get_metadata(user_id, project_id,
+                                              domain_id, group_id)
+        except exception.MetadataNotFound:
+            metadata_ref = {}
+
+        return self._roles_from_role_dicts(metadata_ref.get('roles', []),
+                                           inherited_to_projects)
+
+    def list_role_assignments(self):
+        role_assignments = []
+        for a in self.role.list_role_assignments(self.project.tree_dn):
+            if isinstance(a, UserRoleAssociation):
+                assignment = {
+                    'role_id': self.role._dn_to_id(a.role_dn),
+                    'user_id': self.user._dn_to_id(a.user_dn),
+                    'project_id': self.project._dn_to_id(a.project_dn)}
+            else:
+                assignment = {
+                    'role_id': self.role._dn_to_id(a.role_dn),
+                    'group_id': self.group._dn_to_id(a.group_dn),
+                    'project_id': self.project._dn_to_id(a.project_dn)}
+            role_assignments.append(assignment)
+        return role_assignments
+
+    def delete_project_assignments(self, project_id):
+        tenant_dn = self.project._id_to_dn(project_id)
+        self.role.roles_delete_subtree_by_project(tenant_dn)
+
+    def delete_role_assignments(self, role_id):
+        self.role.roles_delete_subtree_by_role(role_id, self.project.tree_dn)
+
+
+# TODO(termie): turn this into a data object and move logic to driver
+class ProjectApi(common_ldap.ProjectLdapStructureMixin,
+                 common_ldap.EnabledEmuMixIn, common_ldap.BaseLdap):
+
+    model = models.Project
+
+    def __init__(self, conf):
+        super(ProjectApi, self).__init__(conf)
+        self.member_attribute = (conf.ldap.project_member_attribute
+                                 or self.DEFAULT_MEMBER_ATTRIBUTE)
+
+    def get_user_projects(self, user_dn, associations):
+        """Returns list of tenants a user has access to
+        """
+
+        project_ids = set()
+        for assoc in associations:
+            project_ids.add(self._dn_to_id(assoc.project_dn))
+        projects = []
+        for project_id in project_ids:
+            # slower to get them one at a time, but a huge list could blow out
+            # the connection.  This is the safer way
+            projects.append(self.get(project_id))
+        return projects
+
+    def get_user_dns(self, tenant_id, rolegrants, role_dn=None):
+        tenant = self._ldap_get(tenant_id)
+        res = set()
+        if not role_dn:
+            # Get users who have default tenant mapping
+            for user_dn in tenant[1].get(self.member_attribute, []):
+                if self._is_dumb_member(user_dn):
+                    continue
+                res.add(user_dn)
+
+        # Get users who are explicitly mapped via a tenant
+        for rolegrant in rolegrants:
+            if role_dn is None or rolegrant.role_dn == role_dn:
+                res.add(rolegrant.user_dn)
+        return list(res)
+
+
+class UserRoleAssociation(object):
+    """Role Grant model."""
+
+    def __init__(self, user_dn=None, role_dn=None, tenant_dn=None,
+                 *args, **kw):
+        self.user_dn = user_dn
+        self.role_dn = role_dn
+        self.project_dn = tenant_dn
+
+
+class GroupRoleAssociation(object):
+    """Role Grant model."""
+
+    def __init__(self, group_dn=None, role_dn=None, tenant_dn=None,
+                 *args, **kw):
+        self.group_dn = group_dn
+        self.role_dn = role_dn
+        self.project_dn = tenant_dn
+
+
+# TODO(termie): turn this into a data object and move logic to driver
+# NOTE(heny-nash): The RoleLdapStructureMixin class enables the sharing of the
+# LDAP structure between here and the role backend LDAP, no methods are shared.
+class RoleApi(ldap_role.RoleLdapStructureMixin, common_ldap.BaseLdap):
+
+    def __init__(self, conf, user_api):
+        super(RoleApi, self).__init__(conf)
+        self.member_attribute = (conf.ldap.role_member_attribute
+                                 or self.DEFAULT_MEMBER_ATTRIBUTE)
+        self._user_api = user_api
+
+    def add_user(self, role_id, role_dn, user_dn, user_id, tenant_id=None):
+        try:
+            super(RoleApi, self).add_member(user_dn, role_dn)
+        except exception.Conflict:
+            msg = (_('User %(user_id)s already has role %(role_id)s in '
+                     'tenant %(tenant_id)s') %
+                   dict(user_id=user_id, role_id=role_id, tenant_id=tenant_id))
+            raise exception.Conflict(type='role grant', details=msg)
+        except self.NotFound:
+            if tenant_id is None or self.get(role_id) is None:
+                raise Exception(_("Role %s not found") % (role_id,))
+
+            attrs = [('objectClass', [self.object_class]),
+                     (self.member_attribute, [user_dn]),
+                     (self.id_attr, [role_id])]
+
+            if self.use_dumb_member:
+                attrs[1][1].append(self.dumb_member)
+            with self.get_connection() as conn:
+                conn.add_s(role_dn, attrs)
+
+    def delete_user(self, role_dn, user_dn, role_id):
+        try:
+            super(RoleApi, self).remove_member(user_dn, role_dn)
+        except (self.NotFound, ldap.NO_SUCH_ATTRIBUTE):
+            raise exception.RoleNotFound(message=_(
+                'Cannot remove role that has not been granted, %s') %
+                role_id)
+
+    def get_role_assignments(self, tenant_dn):
+        try:
+            roles = self._ldap_get_list(tenant_dn, ldap.SCOPE_ONELEVEL,
+                                        attrlist=[self.member_attribute])
+        except ldap.NO_SUCH_OBJECT:
+            roles = []
+        res = []
+        for role_dn, attrs in roles:
+            try:
+                user_dns = attrs[self.member_attribute]
+            except KeyError:
+                continue
+            for user_dn in user_dns:
+                if self._is_dumb_member(user_dn):
+                    continue
+                res.append(UserRoleAssociation(
+                    user_dn=user_dn,
+                    role_dn=role_dn,
+                    tenant_dn=tenant_dn))
+
+        return res
+
+    def list_global_roles_for_user(self, user_dn):
+        user_dn_esc = ldap.filter.escape_filter_chars(user_dn)
+        roles = self.get_all('(%s=%s)' % (self.member_attribute, user_dn_esc))
+        return [UserRoleAssociation(
+                role_dn=role.dn,
+                user_dn=user_dn) for role in roles]
+
+    def list_project_roles_for_user(self, user_dn, project_subtree):
+        try:
+            roles = self._ldap_get_list(project_subtree, ldap.SCOPE_SUBTREE,
+                                        query_params={
+                                            self.member_attribute: user_dn},
+                                        attrlist=common_ldap.DN_ONLY)
+        except ldap.NO_SUCH_OBJECT:
+            roles = []
+        res = []
+        for role_dn, _role_attrs in roles:
+            # ldap.dn.dn2str returns an array, where the first
+            # element is the first segment.
+            # For a role assignment, this contains the role ID,
+            # The remainder is the DN of the tenant.
+            # role_dn is already utf8 encoded since it came from LDAP.
+            tenant = ldap.dn.str2dn(role_dn)
+            tenant.pop(0)
+            tenant_dn = ldap.dn.dn2str(tenant)
+            res.append(UserRoleAssociation(
+                user_dn=user_dn,
+                role_dn=role_dn,
+                tenant_dn=tenant_dn))
+        return res
+
+    def list_project_roles_for_group(self, group_dn, project_subtree):
+        group_dn_esc = ldap.filter.escape_filter_chars(group_dn)
+        query = '(&(objectClass=%s)(%s=%s))' % (self.object_class,
+                                                self.member_attribute,
+                                                group_dn_esc)
+        with self.get_connection() as conn:
+            try:
+                roles = conn.search_s(project_subtree,
+                                      ldap.SCOPE_SUBTREE,
+                                      query,
+                                      attrlist=common_ldap.DN_ONLY)
+            except ldap.NO_SUCH_OBJECT:
+                # Return no roles rather than raise an exception if the project
+                # subtree entry doesn't exist because an empty subtree is not
+                # an error.
+                return []
+
+        res = []
+        for role_dn, _role_attrs in roles:
+            # ldap.dn.str2dn returns a list, where the first
+            # element is the first RDN.
+            # For a role assignment, this contains the role ID,
+            # the remainder is the DN of the project.
+            # role_dn is already utf8 encoded since it came from LDAP.
+            project = ldap.dn.str2dn(role_dn)
+            project.pop(0)
+            project_dn = ldap.dn.dn2str(project)
+            res.append(GroupRoleAssociation(
+                group_dn=group_dn,
+                role_dn=role_dn,
+                tenant_dn=project_dn))
+        return res
+
+    def roles_delete_subtree_by_project(self, tenant_dn):
+        self._delete_tree_nodes(tenant_dn, ldap.SCOPE_ONELEVEL)
+
+    def roles_delete_subtree_by_role(self, role_id, tree_dn):
+        self._delete_tree_nodes(tree_dn, ldap.SCOPE_SUBTREE, query_params={
+            self.id_attr: role_id})
+
+    def list_role_assignments(self, project_tree_dn):
+        """Returns a list of all the role assignments linked to project_tree_dn
+        attribute.
+        """
+        try:
+            roles = self._ldap_get_list(project_tree_dn, ldap.SCOPE_SUBTREE,
+                                        attrlist=[self.member_attribute])
+        except ldap.NO_SUCH_OBJECT:
+            roles = []
+        res = []
+        for role_dn, role in roles:
+            # role_dn is already utf8 encoded since it came from LDAP.
+            tenant = ldap.dn.str2dn(role_dn)
+            tenant.pop(0)
+            # It obtains the tenant DN to construct the UserRoleAssociation
+            # object.
+            tenant_dn = ldap.dn.dn2str(tenant)
+            for occupant_dn in role[self.member_attribute]:
+                if self._is_dumb_member(occupant_dn):
+                    continue
+                if self._user_api.is_user(occupant_dn):
+                    association = UserRoleAssociation(
+                        user_dn=occupant_dn,
+                        role_dn=role_dn,
+                        tenant_dn=tenant_dn)
+                else:
+                    # occupant_dn is a group.
+                    association = GroupRoleAssociation(
+                        group_dn=occupant_dn,
+                        role_dn=role_dn,
+                        tenant_dn=tenant_dn)
+                res.append(association)
+        return res
diff --git a/keystone-moon/keystone/assignment/backends/sql.py b/keystone-moon/keystone/assignment/backends/sql.py
new file mode 100644 (file)
index 0000000..2de6ca6
--- /dev/null
@@ -0,0 +1,415 @@
+# Copyright 2012-13 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+from oslo_log import log
+import six
+import sqlalchemy
+from sqlalchemy.sql.expression import false
+
+from keystone import assignment as keystone_assignment
+from keystone.common import sql
+from keystone import exception
+from keystone.i18n import _
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+class AssignmentType(object):
+    USER_PROJECT = 'UserProject'
+    GROUP_PROJECT = 'GroupProject'
+    USER_DOMAIN = 'UserDomain'
+    GROUP_DOMAIN = 'GroupDomain'
+
+    @classmethod
+    def calculate_type(cls, user_id, group_id, project_id, domain_id):
+        if user_id:
+            if project_id:
+                return cls.USER_PROJECT
+            if domain_id:
+                return cls.USER_DOMAIN
+        if group_id:
+            if project_id:
+                return cls.GROUP_PROJECT
+            if domain_id:
+                return cls.GROUP_DOMAIN
+        # Invalid parameters combination
+        raise exception.AssignmentTypeCalculationError(**locals())
+
+
+class Assignment(keystone_assignment.Driver):
+
+    def default_role_driver(self):
+        return "keystone.assignment.role_backends.sql.Role"
+
+    def default_resource_driver(self):
+        return 'keystone.resource.backends.sql.Resource'
+
+    def list_user_ids_for_project(self, tenant_id):
+        with sql.transaction() as session:
+            query = session.query(RoleAssignment.actor_id)
+            query = query.filter_by(type=AssignmentType.USER_PROJECT)
+            query = query.filter_by(target_id=tenant_id)
+            query = query.distinct('actor_id')
+            assignments = query.all()
+            return [assignment.actor_id for assignment in assignments]
+
+    def _get_metadata(self, user_id=None, tenant_id=None,
+                      domain_id=None, group_id=None, session=None):
+        # TODO(henry-nash): This method represents the last vestiges of the old
+        # metadata concept in this driver.  Although we no longer need it here,
+        # since the Manager layer uses the metadata concept across all
+        # assignment drivers, we need to remove it from all of them in order to
+        # finally remove this method.
+
+        # We aren't given a session when called by the manager directly.
+        if session is None:
+            session = sql.get_session()
+
+        q = session.query(RoleAssignment)
+
+        def _calc_assignment_type():
+            # Figure out the assignment type we're checking for from the args.
+            if user_id:
+                if tenant_id:
+                    return AssignmentType.USER_PROJECT
+                else:
+                    return AssignmentType.USER_DOMAIN
+            else:
+                if tenant_id:
+                    return AssignmentType.GROUP_PROJECT
+                else:
+                    return AssignmentType.GROUP_DOMAIN
+
+        q = q.filter_by(type=_calc_assignment_type())
+        q = q.filter_by(actor_id=user_id or group_id)
+        q = q.filter_by(target_id=tenant_id or domain_id)
+        refs = q.all()
+        if not refs:
+            raise exception.MetadataNotFound()
+
+        metadata_ref = {}
+        metadata_ref['roles'] = []
+        for assignment in refs:
+            role_ref = {}
+            role_ref['id'] = assignment.role_id
+            if assignment.inherited:
+                role_ref['inherited_to'] = 'projects'
+            metadata_ref['roles'].append(role_ref)
+
+        return metadata_ref
+
+    def create_grant(self, role_id, user_id=None, group_id=None,
+                     domain_id=None, project_id=None,
+                     inherited_to_projects=False):
+
+        assignment_type = AssignmentType.calculate_type(
+            user_id, group_id, project_id, domain_id)
+        try:
+            with sql.transaction() as session:
+                session.add(RoleAssignment(
+                    type=assignment_type,
+                    actor_id=user_id or group_id,
+                    target_id=project_id or domain_id,
+                    role_id=role_id,
+                    inherited=inherited_to_projects))
+        except sql.DBDuplicateEntry:
+            # The v3 grant APIs are silent if the assignment already exists
+            pass
+
+    def list_grant_role_ids(self, user_id=None, group_id=None,
+                            domain_id=None, project_id=None,
+                            inherited_to_projects=False):
+        with sql.transaction() as session:
+            q = session.query(RoleAssignment.role_id)
+            q = q.filter(RoleAssignment.actor_id == (user_id or group_id))
+            q = q.filter(RoleAssignment.target_id == (project_id or domain_id))
+            q = q.filter(RoleAssignment.inherited == inherited_to_projects)
+            return [x.role_id for x in q.all()]
+
+    def _build_grant_filter(self, session, role_id, user_id, group_id,
+                            domain_id, project_id, inherited_to_projects):
+        q = session.query(RoleAssignment)
+        q = q.filter_by(actor_id=user_id or group_id)
+        q = q.filter_by(target_id=project_id or domain_id)
+        q = q.filter_by(role_id=role_id)
+        q = q.filter_by(inherited=inherited_to_projects)
+        return q
+
+    def check_grant_role_id(self, role_id, user_id=None, group_id=None,
+                            domain_id=None, project_id=None,
+                            inherited_to_projects=False):
+        with sql.transaction() as session:
+            try:
+                q = self._build_grant_filter(
+                    session, role_id, user_id, group_id, domain_id, project_id,
+                    inherited_to_projects)
+                q.one()
+            except sql.NotFound:
+                actor_id = user_id or group_id
+                target_id = domain_id or project_id
+                raise exception.RoleAssignmentNotFound(role_id=role_id,
+                                                       actor_id=actor_id,
+                                                       target_id=target_id)
+
+    def delete_grant(self, role_id, user_id=None, group_id=None,
+                     domain_id=None, project_id=None,
+                     inherited_to_projects=False):
+        with sql.transaction() as session:
+            q = self._build_grant_filter(
+                session, role_id, user_id, group_id, domain_id, project_id,
+                inherited_to_projects)
+            if not q.delete(False):
+                actor_id = user_id or group_id
+                target_id = domain_id or project_id
+                raise exception.RoleAssignmentNotFound(role_id=role_id,
+                                                       actor_id=actor_id,
+                                                       target_id=target_id)
+
+    def _list_project_ids_for_actor(self, actors, hints, inherited,
+                                    group_only=False):
+        # TODO(henry-nash): Now that we have a single assignment table, we
+        # should be able to honor the hints list that is provided.
+
+        assignment_type = [AssignmentType.GROUP_PROJECT]
+        if not group_only:
+            assignment_type.append(AssignmentType.USER_PROJECT)
+
+        sql_constraints = sqlalchemy.and_(
+            RoleAssignment.type.in_(assignment_type),
+            RoleAssignment.inherited == inherited,
+            RoleAssignment.actor_id.in_(actors))
+
+        with sql.transaction() as session:
+            query = session.query(RoleAssignment.target_id).filter(
+                sql_constraints).distinct()
+
+        return [x.target_id for x in query.all()]
+
+    def list_project_ids_for_user(self, user_id, group_ids, hints,
+                                  inherited=False):
+        actor_list = [user_id]
+        if group_ids:
+            actor_list = actor_list + group_ids
+
+        return self._list_project_ids_for_actor(actor_list, hints, inherited)
+
+    def list_domain_ids_for_user(self, user_id, group_ids, hints,
+                                 inherited=False):
+        with sql.transaction() as session:
+            query = session.query(RoleAssignment.target_id)
+            filters = []
+
+            if user_id:
+                sql_constraints = sqlalchemy.and_(
+                    RoleAssignment.actor_id == user_id,
+                    RoleAssignment.inherited == inherited,
+                    RoleAssignment.type == AssignmentType.USER_DOMAIN)
+                filters.append(sql_constraints)
+
+            if group_ids:
+                sql_constraints = sqlalchemy.and_(
+                    RoleAssignment.actor_id.in_(group_ids),
+                    RoleAssignment.inherited == inherited,
+                    RoleAssignment.type == AssignmentType.GROUP_DOMAIN)
+                filters.append(sql_constraints)
+
+            if not filters:
+                return []
+
+            query = query.filter(sqlalchemy.or_(*filters)).distinct()
+
+            return [assignment.target_id for assignment in query.all()]
+
+    def list_role_ids_for_groups_on_domain(self, group_ids, domain_id):
+        if not group_ids:
+            # If there's no groups then there will be no domain roles.
+            return []
+
+        sql_constraints = sqlalchemy.and_(
+            RoleAssignment.type == AssignmentType.GROUP_DOMAIN,
+            RoleAssignment.target_id == domain_id,
+            RoleAssignment.inherited == false(),
+            RoleAssignment.actor_id.in_(group_ids))
+
+        with sql.transaction() as session:
+            query = session.query(RoleAssignment.role_id).filter(
+                sql_constraints).distinct()
+        return [role.role_id for role in query.all()]
+
+    def list_role_ids_for_groups_on_project(
+            self, group_ids, project_id, project_domain_id, project_parents):
+
+        if not group_ids:
+            # If there's no groups then there will be no project roles.
+            return []
+
+        # NOTE(rodrigods): First, we always include projects with
+        # non-inherited assignments
+        sql_constraints = sqlalchemy.and_(
+            RoleAssignment.type == AssignmentType.GROUP_PROJECT,
+            RoleAssignment.inherited == false(),
+            RoleAssignment.target_id == project_id)
+
+        if CONF.os_inherit.enabled:
+            # Inherited roles from domains
+            sql_constraints = sqlalchemy.or_(
+                sql_constraints,
+                sqlalchemy.and_(
+                    RoleAssignment.type == AssignmentType.GROUP_DOMAIN,
+                    RoleAssignment.inherited,
+                    RoleAssignment.target_id == project_domain_id))
+
+            # Inherited roles from projects
+            if project_parents:
+                sql_constraints = sqlalchemy.or_(
+                    sql_constraints,
+                    sqlalchemy.and_(
+                        RoleAssignment.type == AssignmentType.GROUP_PROJECT,
+                        RoleAssignment.inherited,
+                        RoleAssignment.target_id.in_(project_parents)))
+
+        sql_constraints = sqlalchemy.and_(
+            sql_constraints, RoleAssignment.actor_id.in_(group_ids))
+
+        with sql.transaction() as session:
+            # NOTE(morganfainberg): Only select the columns we actually care
+            # about here, in this case role_id.
+            query = session.query(RoleAssignment.role_id).filter(
+                sql_constraints).distinct()
+
+        return [result.role_id for result in query.all()]
+
+    def list_project_ids_for_groups(self, group_ids, hints,
+                                    inherited=False):
+        return self._list_project_ids_for_actor(
+            group_ids, hints, inherited, group_only=True)
+
+    def list_domain_ids_for_groups(self, group_ids, inherited=False):
+        if not group_ids:
+            # If there's no groups then there will be no domains.
+            return []
+
+        group_sql_conditions = sqlalchemy.and_(
+            RoleAssignment.type == AssignmentType.GROUP_DOMAIN,
+            RoleAssignment.inherited == inherited,
+            RoleAssignment.actor_id.in_(group_ids))
+
+        with sql.transaction() as session:
+            query = session.query(RoleAssignment.target_id).filter(
+                group_sql_conditions).distinct()
+        return [x.target_id for x in query.all()]
+
+    def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
+        try:
+            with sql.transaction() as session:
+                session.add(RoleAssignment(
+                    type=AssignmentType.USER_PROJECT,
+                    actor_id=user_id, target_id=tenant_id,
+                    role_id=role_id, inherited=False))
+        except sql.DBDuplicateEntry:
+            msg = ('User %s already has role %s in tenant %s'
+                   % (user_id, role_id, tenant_id))
+            raise exception.Conflict(type='role grant', details=msg)
+
+    def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
+        with sql.transaction() as session:
+            q = session.query(RoleAssignment)
+            q = q.filter_by(actor_id=user_id)
+            q = q.filter_by(target_id=tenant_id)
+            q = q.filter_by(role_id=role_id)
+            if q.delete() == 0:
+                raise exception.RoleNotFound(message=_(
+                    'Cannot remove role that has not been granted, %s') %
+                    role_id)
+
+    def list_role_assignments(self):
+
+        def denormalize_role(ref):
+            assignment = {}
+            if ref.type == AssignmentType.USER_PROJECT:
+                assignment['user_id'] = ref.actor_id
+                assignment['project_id'] = ref.target_id
+            elif ref.type == AssignmentType.USER_DOMAIN:
+                assignment['user_id'] = ref.actor_id
+                assignment['domain_id'] = ref.target_id
+            elif ref.type == AssignmentType.GROUP_PROJECT:
+                assignment['group_id'] = ref.actor_id
+                assignment['project_id'] = ref.target_id
+            elif ref.type == AssignmentType.GROUP_DOMAIN:
+                assignment['group_id'] = ref.actor_id
+                assignment['domain_id'] = ref.target_id
+            else:
+                raise exception.Error(message=_(
+                    'Unexpected assignment type encountered, %s') %
+                    ref.type)
+            assignment['role_id'] = ref.role_id
+            if ref.inherited:
+                assignment['inherited_to_projects'] = 'projects'
+            return assignment
+
+        with sql.transaction() as session:
+            refs = session.query(RoleAssignment).all()
+            return [denormalize_role(ref) for ref in refs]
+
+    def delete_project_assignments(self, project_id):
+        with sql.transaction() as session:
+            q = session.query(RoleAssignment)
+            q = q.filter_by(target_id=project_id)
+            q.delete(False)
+
+    def delete_role_assignments(self, role_id):
+        with sql.transaction() as session:
+            q = session.query(RoleAssignment)
+            q = q.filter_by(role_id=role_id)
+            q.delete(False)
+
+    def delete_user(self, user_id):
+        with sql.transaction() as session:
+            q = session.query(RoleAssignment)
+            q = q.filter_by(actor_id=user_id)
+            q.delete(False)
+
+    def delete_group(self, group_id):
+        with sql.transaction() as session:
+            q = session.query(RoleAssignment)
+            q = q.filter_by(actor_id=group_id)
+            q.delete(False)
+
+
+class RoleAssignment(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'assignment'
+    attributes = ['type', 'actor_id', 'target_id', 'role_id', 'inherited']
+    # NOTE(henry-nash); Postgres requires a name to be defined for an Enum
+    type = sql.Column(
+        sql.Enum(AssignmentType.USER_PROJECT, AssignmentType.GROUP_PROJECT,
+                 AssignmentType.USER_DOMAIN, AssignmentType.GROUP_DOMAIN,
+                 name='type'),
+        nullable=False)
+    actor_id = sql.Column(sql.String(64), nullable=False, index=True)
+    target_id = sql.Column(sql.String(64), nullable=False)
+    role_id = sql.Column(sql.String(64), nullable=False)
+    inherited = sql.Column(sql.Boolean, default=False, nullable=False)
+    __table_args__ = (sql.PrimaryKeyConstraint('type', 'actor_id', 'target_id',
+                                               'role_id'), {})
+
+    def to_dict(self):
+        """Override parent to_dict() method with a simpler implementation.
+
+        RoleAssignment doesn't have non-indexed 'extra' attributes, so the
+        parent implementation is not applicable.
+        """
+        return dict(six.iteritems(self))
diff --git a/keystone-moon/keystone/assignment/controllers.py b/keystone-moon/keystone/assignment/controllers.py
new file mode 100644 (file)
index 0000000..ff27fd3
--- /dev/null
@@ -0,0 +1,816 @@
+# Copyright 2013 Metacloud, Inc.
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Workflow Logic the Assignment service."""
+
+import copy
+import functools
+import uuid
+
+from oslo_config import cfg
+from oslo_log import log
+from six.moves import urllib
+
+from keystone.assignment import schema
+from keystone.common import controller
+from keystone.common import dependency
+from keystone.common import validation
+from keystone import exception
+from keystone.i18n import _, _LW
+from keystone.models import token_model
+from keystone import notifications
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+@dependency.requires('assignment_api', 'identity_api', 'token_provider_api')
+class TenantAssignment(controller.V2Controller):
+    """The V2 Project APIs that are processing assignments."""
+
+    @controller.v2_deprecated
+    def get_projects_for_token(self, context, **kw):
+        """Get valid tenants for token based on token used to authenticate.
+
+        Pulls the token from the context, validates it and gets the valid
+        tenants for the user in the token.
+
+        Doesn't care about token scopedness.
+
+        """
+        try:
+            token_data = self.token_provider_api.validate_token(
+                context['token_id'])
+            token_ref = token_model.KeystoneToken(token_id=context['token_id'],
+                                                  token_data=token_data)
+        except exception.NotFound as e:
+            LOG.warning(_LW('Authentication failed: %s'), e)
+            raise exception.Unauthorized(e)
+
+        tenant_refs = (
+            self.assignment_api.list_projects_for_user(token_ref.user_id))
+        tenant_refs = [self.filter_domain_id(ref) for ref in tenant_refs
+                       if ref['domain_id'] == CONF.identity.default_domain_id]
+        params = {
+            'limit': context['query_string'].get('limit'),
+            'marker': context['query_string'].get('marker'),
+        }
+        return self.format_project_list(tenant_refs, **params)
+
+    @controller.v2_deprecated
+    def get_project_users(self, context, tenant_id, **kw):
+        self.assert_admin(context)
+        user_refs = []
+        user_ids = self.assignment_api.list_user_ids_for_project(tenant_id)
+        for user_id in user_ids:
+            try:
+                user_ref = self.identity_api.get_user(user_id)
+            except exception.UserNotFound:
+                # Log that user is missing and continue on.
+                message = ("User %(user_id)s in project %(project_id)s "
+                           "doesn't exist.")
+                LOG.debug(message,
+                          {'user_id': user_id, 'project_id': tenant_id})
+            else:
+                user_refs.append(self.v3_to_v2_user(user_ref))
+        return {'users': user_refs}
+
+
+@dependency.requires('assignment_api', 'role_api')
+class Role(controller.V2Controller):
+    """The Role management APIs."""
+
+    @controller.v2_deprecated
+    def get_role(self, context, role_id):
+        self.assert_admin(context)
+        return {'role': self.role_api.get_role(role_id)}
+
+    @controller.v2_deprecated
+    def create_role(self, context, role):
+        role = self._normalize_dict(role)
+        self.assert_admin(context)
+
+        if 'name' not in role or not role['name']:
+            msg = _('Name field is required and cannot be empty')
+            raise exception.ValidationError(message=msg)
+
+        role_id = uuid.uuid4().hex
+        role['id'] = role_id
+        role_ref = self.role_api.create_role(role_id, role)
+        return {'role': role_ref}
+
+    @controller.v2_deprecated
+    def delete_role(self, context, role_id):
+        self.assert_admin(context)
+        self.role_api.delete_role(role_id)
+
+    @controller.v2_deprecated
+    def get_roles(self, context):
+        self.assert_admin(context)
+        return {'roles': self.role_api.list_roles()}
+
+
+@dependency.requires('assignment_api', 'resource_api', 'role_api')
+class RoleAssignmentV2(controller.V2Controller):
+    """The V2 Role APIs that are processing assignments."""
+
+    # COMPAT(essex-3)
+    @controller.v2_deprecated
+    def get_user_roles(self, context, user_id, tenant_id=None):
+        """Get the roles for a user and tenant pair.
+
+        Since we're trying to ignore the idea of user-only roles we're
+        not implementing them in hopes that the idea will die off.
+
+        """
+        self.assert_admin(context)
+        roles = self.assignment_api.get_roles_for_user_and_project(
+            user_id, tenant_id)
+        return {'roles': [self.role_api.get_role(x)
+                          for x in roles]}
+
+    @controller.v2_deprecated
+    def add_role_to_user(self, context, user_id, role_id, tenant_id=None):
+        """Add a role to a user and tenant pair.
+
+        Since we're trying to ignore the idea of user-only roles we're
+        not implementing them in hopes that the idea will die off.
+
+        """
+        self.assert_admin(context)
+        if tenant_id is None:
+            raise exception.NotImplemented(message='User roles not supported: '
+                                                   'tenant_id required')
+
+        self.assignment_api.add_role_to_user_and_project(
+            user_id, tenant_id, role_id)
+
+        role_ref = self.role_api.get_role(role_id)
+        return {'role': role_ref}
+
+    @controller.v2_deprecated
+    def remove_role_from_user(self, context, user_id, role_id, tenant_id=None):
+        """Remove a role from a user and tenant pair.
+
+        Since we're trying to ignore the idea of user-only roles we're
+        not implementing them in hopes that the idea will die off.
+
+        """
+        self.assert_admin(context)
+        if tenant_id is None:
+            raise exception.NotImplemented(message='User roles not supported: '
+                                                   'tenant_id required')
+
+        # This still has the weird legacy semantics that adding a role to
+        # a user also adds them to a tenant, so we must follow up on that
+        self.assignment_api.remove_role_from_user_and_project(
+            user_id, tenant_id, role_id)
+
+    # COMPAT(diablo): CRUD extension
+    @controller.v2_deprecated
+    def get_role_refs(self, context, user_id):
+        """Ultimate hack to get around having to make role_refs first-class.
+
+        This will basically iterate over the various roles the user has in
+        all tenants the user is a member of and create fake role_refs where
+        the id encodes the user-tenant-role information so we can look
+        up the appropriate data when we need to delete them.
+
+        """
+        self.assert_admin(context)
+        tenants = self.assignment_api.list_projects_for_user(user_id)
+        o = []
+        for tenant in tenants:
+            # As a v2 call, we should limit the response to those projects in
+            # the default domain.
+            if tenant['domain_id'] != CONF.identity.default_domain_id:
+                continue
+            role_ids = self.assignment_api.get_roles_for_user_and_project(
+                user_id, tenant['id'])
+            for role_id in role_ids:
+                ref = {'roleId': role_id,
+                       'tenantId': tenant['id'],
+                       'userId': user_id}
+                ref['id'] = urllib.parse.urlencode(ref)
+                o.append(ref)
+        return {'roles': o}
+
+    # COMPAT(diablo): CRUD extension
+    @controller.v2_deprecated
+    def create_role_ref(self, context, user_id, role):
+        """This is actually used for adding a user to a tenant.
+
+        In the legacy data model adding a user to a tenant required setting
+        a role.
+
+        """
+        self.assert_admin(context)
+        # TODO(termie): for now we're ignoring the actual role
+        tenant_id = role.get('tenantId')
+        role_id = role.get('roleId')
+        self.assignment_api.add_role_to_user_and_project(
+            user_id, tenant_id, role_id)
+
+        role_ref = self.role_api.get_role(role_id)
+        return {'role': role_ref}
+
+    # COMPAT(diablo): CRUD extension
+    @controller.v2_deprecated
+    def delete_role_ref(self, context, user_id, role_ref_id):
+        """This is actually used for deleting a user from a tenant.
+
+        In the legacy data model removing a user from a tenant required
+        deleting a role.
+
+        To emulate this, we encode the tenant and role in the role_ref_id,
+        and if this happens to be the last role for the user-tenant pair,
+        we remove the user from the tenant.
+
+        """
+        self.assert_admin(context)
+        # TODO(termie): for now we're ignoring the actual role
+        role_ref_ref = urllib.parse.parse_qs(role_ref_id)
+        tenant_id = role_ref_ref.get('tenantId')[0]
+        role_id = role_ref_ref.get('roleId')[0]
+        self.assignment_api.remove_role_from_user_and_project(
+            user_id, tenant_id, role_id)
+
+
+@dependency.requires('assignment_api', 'resource_api')
+class ProjectAssignmentV3(controller.V3Controller):
+    """The V3 Project APIs that are processing assignments."""
+
+    collection_name = 'projects'
+    member_name = 'project'
+
+    def __init__(self):
+        super(ProjectAssignmentV3, self).__init__()
+        self.get_member_from_driver = self.resource_api.get_project
+
+    @controller.filterprotected('enabled', 'name')
+    def list_user_projects(self, context, filters, user_id):
+        hints = ProjectAssignmentV3.build_driver_hints(context, filters)
+        refs = self.assignment_api.list_projects_for_user(user_id,
+                                                          hints=hints)
+        return ProjectAssignmentV3.wrap_collection(context, refs, hints=hints)
+
+
+@dependency.requires('role_api')
+class RoleV3(controller.V3Controller):
+    """The V3 Role CRUD APIs."""
+
+    collection_name = 'roles'
+    member_name = 'role'
+
+    def __init__(self):
+        super(RoleV3, self).__init__()
+        self.get_member_from_driver = self.role_api.get_role
+
+    @controller.protected()
+    @validation.validated(schema.role_create, 'role')
+    def create_role(self, context, role):
+        ref = self._assign_unique_id(self._normalize_dict(role))
+        initiator = notifications._get_request_audit_info(context)
+        ref = self.role_api.create_role(ref['id'], ref, initiator)
+        return RoleV3.wrap_member(context, ref)
+
+    @controller.filterprotected('name')
+    def list_roles(self, context, filters):
+        hints = RoleV3.build_driver_hints(context, filters)
+        refs = self.role_api.list_roles(
+            hints=hints)
+        return RoleV3.wrap_collection(context, refs, hints=hints)
+
+    @controller.protected()
+    def get_role(self, context, role_id):
+        ref = self.role_api.get_role(role_id)
+        return RoleV3.wrap_member(context, ref)
+
+    @controller.protected()
+    @validation.validated(schema.role_update, 'role')
+    def update_role(self, context, role_id, role):
+        self._require_matching_id(role_id, role)
+        initiator = notifications._get_request_audit_info(context)
+        ref = self.role_api.update_role(role_id, role, initiator)
+        return RoleV3.wrap_member(context, ref)
+
+    @controller.protected()
+    def delete_role(self, context, role_id):
+        initiator = notifications._get_request_audit_info(context)
+        self.role_api.delete_role(role_id, initiator)
+
+
+@dependency.requires('assignment_api', 'identity_api', 'resource_api',
+                     'role_api')
+class GrantAssignmentV3(controller.V3Controller):
+    """The V3 Grant Assignment APIs."""
+
+    collection_name = 'roles'
+    member_name = 'role'
+
+    def __init__(self):
+        super(GrantAssignmentV3, self).__init__()
+        self.get_member_from_driver = self.role_api.get_role
+
+    def _require_domain_xor_project(self, domain_id, project_id):
+        if domain_id and project_id:
+            msg = _('Specify a domain or project, not both')
+            raise exception.ValidationError(msg)
+        if not domain_id and not project_id:
+            msg = _('Specify one of domain or project')
+            raise exception.ValidationError(msg)
+
+    def _require_user_xor_group(self, user_id, group_id):
+        if user_id and group_id:
+            msg = _('Specify a user or group, not both')
+            raise exception.ValidationError(msg)
+        if not user_id and not group_id:
+            msg = _('Specify one of user or group')
+            raise exception.ValidationError(msg)
+
+    def _check_if_inherited(self, context):
+        return (CONF.os_inherit.enabled and
+                context['path'].startswith('/OS-INHERIT') and
+                context['path'].endswith('/inherited_to_projects'))
+
+    def _check_grant_protection(self, context, protection, role_id=None,
+                                user_id=None, group_id=None,
+                                domain_id=None, project_id=None,
+                                allow_no_user=False):
+        """Check protection for role grant APIs.
+
+        The policy rule might want to inspect attributes of any of the entities
+        involved in the grant.  So we get these and pass them to the
+        check_protection() handler in the controller.
+
+        """
+        ref = {}
+        if role_id:
+            ref['role'] = self.role_api.get_role(role_id)
+        if user_id:
+            try:
+                ref['user'] = self.identity_api.get_user(user_id)
+            except exception.UserNotFound:
+                if not allow_no_user:
+                    raise
+        else:
+            ref['group'] = self.identity_api.get_group(group_id)
+
+        if domain_id:
+            ref['domain'] = self.resource_api.get_domain(domain_id)
+        else:
+            ref['project'] = self.resource_api.get_project(project_id)
+
+        self.check_protection(context, protection, ref)
+
+    @controller.protected(callback=_check_grant_protection)
+    def create_grant(self, context, role_id, user_id=None,
+                     group_id=None, domain_id=None, project_id=None):
+        """Grants a role to a user or group on either a domain or project."""
+        self._require_domain_xor_project(domain_id, project_id)
+        self._require_user_xor_group(user_id, group_id)
+
+        self.assignment_api.create_grant(
+            role_id, user_id, group_id, domain_id, project_id,
+            self._check_if_inherited(context), context)
+
+    @controller.protected(callback=_check_grant_protection)
+    def list_grants(self, context, user_id=None,
+                    group_id=None, domain_id=None, project_id=None):
+        """Lists roles granted to user/group on either a domain or project."""
+        self._require_domain_xor_project(domain_id, project_id)
+        self._require_user_xor_group(user_id, group_id)
+
+        refs = self.assignment_api.list_grants(
+            user_id, group_id, domain_id, project_id,
+            self._check_if_inherited(context))
+        return GrantAssignmentV3.wrap_collection(context, refs)
+
+    @controller.protected(callback=_check_grant_protection)
+    def check_grant(self, context, role_id, user_id=None,
+                    group_id=None, domain_id=None, project_id=None):
+        """Checks if a role has been granted on either a domain or project."""
+        self._require_domain_xor_project(domain_id, project_id)
+        self._require_user_xor_group(user_id, group_id)
+
+        self.assignment_api.get_grant(
+            role_id, user_id, group_id, domain_id, project_id,
+            self._check_if_inherited(context))
+
+    # NOTE(lbragstad): This will allow users to clean up role assignments
+    # from the backend in the event the user was removed prior to the role
+    # assignment being removed.
+    @controller.protected(callback=functools.partial(
+        _check_grant_protection, allow_no_user=True))
+    def revoke_grant(self, context, role_id, user_id=None,
+                     group_id=None, domain_id=None, project_id=None):
+        """Revokes a role from user/group on either a domain or project."""
+        self._require_domain_xor_project(domain_id, project_id)
+        self._require_user_xor_group(user_id, group_id)
+
+        self.assignment_api.delete_grant(
+            role_id, user_id, group_id, domain_id, project_id,
+            self._check_if_inherited(context), context)
+
+
+@dependency.requires('assignment_api', 'identity_api', 'resource_api')
+class RoleAssignmentV3(controller.V3Controller):
+    """The V3 Role Assignment APIs, really just list_role_assignment()."""
+
+    # TODO(henry-nash): The current implementation does not provide a full
+    # first class entity for role-assignment. There is no role_assignment_id
+    # and only the list_role_assignment call is supported. Further, since it
+    # is not a first class entity, the links for the individual entities
+    # reference the individual role grant APIs.
+
+    collection_name = 'role_assignments'
+    member_name = 'role_assignment'
+
+    @classmethod
+    def wrap_member(cls, context, ref):
+        # NOTE(henry-nash): Since we are not yet a true collection, we override
+        # the wrapper as have already included the links in the entities
+        pass
+
+    def _format_entity(self, context, entity):
+        """Format an assignment entity for API response.
+
+        The driver layer returns entities as dicts containing the ids of the
+        actor (e.g. user or group), target (e.g. domain or project) and role.
+        If it is an inherited role, then this is also indicated. Examples:
+
+        {'user_id': user_id,
+         'project_id': domain_id,
+         'role_id': role_id}
+
+        or, for an inherited role:
+
+        {'user_id': user_id,
+         'domain_id': domain_id,
+         'role_id': role_id,
+         'inherited_to_projects': true}
+
+        This function maps this into the format to be returned via the API,
+        e.g. for the second example above:
+
+        {
+            'user': {
+                {'id': user_id}
+            },
+            'scope': {
+                'domain': {
+                    {'id': domain_id}
+                },
+                'OS-INHERIT:inherited_to': 'projects
+            },
+            'role': {
+                {'id': role_id}
+            },
+            'links': {
+                'assignment': '/domains/domain_id/users/user_id/roles/'
+                              'role_id/inherited_to_projects'
+            }
+        }
+
+        """
+
+        formatted_entity = {}
+        suffix = ""
+        if 'user_id' in entity:
+            formatted_entity['user'] = {'id': entity['user_id']}
+            actor_link = 'users/%s' % entity['user_id']
+        if 'group_id' in entity:
+            formatted_entity['group'] = {'id': entity['group_id']}
+            actor_link = 'groups/%s' % entity['group_id']
+        if 'role_id' in entity:
+            formatted_entity['role'] = {'id': entity['role_id']}
+        if 'project_id' in entity:
+            formatted_entity['scope'] = (
+                {'project': {'id': entity['project_id']}})
+            if 'inherited_to_projects' in entity:
+                formatted_entity['scope']['OS-INHERIT:inherited_to'] = (
+                    'projects')
+                target_link = '/OS-INHERIT/projects/%s' % entity['project_id']
+                suffix = '/inherited_to_projects'
+            else:
+                target_link = '/projects/%s' % entity['project_id']
+        if 'domain_id' in entity:
+            formatted_entity['scope'] = (
+                {'domain': {'id': entity['domain_id']}})
+            if 'inherited_to_projects' in entity:
+                formatted_entity['scope']['OS-INHERIT:inherited_to'] = (
+                    'projects')
+                target_link = '/OS-INHERIT/domains/%s' % entity['domain_id']
+                suffix = '/inherited_to_projects'
+            else:
+                target_link = '/domains/%s' % entity['domain_id']
+        formatted_entity.setdefault('links', {})
+
+        path = '%(target)s/%(actor)s/roles/%(role)s%(suffix)s' % {
+            'target': target_link,
+            'actor': actor_link,
+            'role': entity['role_id'],
+            'suffix': suffix}
+        formatted_entity['links']['assignment'] = self.base_url(context, path)
+
+        return formatted_entity
+
+    def _expand_indirect_assignments(self, context, refs):
+        """Processes entity list into all-direct assignments.
+
+        For any group role assignments in the list, create a role assignment
+        entity for each member of that group, and then remove the group
+        assignment entity itself from the list.
+
+        If the OS-INHERIT extension is enabled, then honor any inherited
+        roles on the domain by creating the equivalent on all projects
+        owned by the domain.
+
+        For any new entity created by virtue of group membership, add in an
+        additional link to that membership.
+
+        """
+        def _get_group_members(ref):
+            """Get a list of group members.
+
+            Get the list of group members.  If this fails with
+            GroupNotFound, then log this as a warning, but allow
+            overall processing to continue.
+
+            """
+            try:
+                members = self.identity_api.list_users_in_group(
+                    ref['group']['id'])
+            except exception.GroupNotFound:
+                members = []
+                # The group is missing, which should not happen since
+                # group deletion should remove any related assignments, so
+                # log a warning
+                target = 'Unknown'
+                # Should always be a domain or project, but since to get
+                # here things have gone astray, let's be cautious.
+                if 'scope' in ref:
+                    if 'domain' in ref['scope']:
+                        dom_id = ref['scope']['domain'].get('id', 'Unknown')
+                        target = 'Domain: %s' % dom_id
+                    elif 'project' in ref['scope']:
+                        proj_id = ref['scope']['project'].get('id', 'Unknown')
+                        target = 'Project: %s' % proj_id
+                role_id = 'Unknown'
+                if 'role' in ref and 'id' in ref['role']:
+                    role_id = ref['role']['id']
+                LOG.warning(
+                    _LW('Group %(group)s not found for role-assignment - '
+                        '%(target)s with Role: %(role)s'), {
+                            'group': ref['group']['id'], 'target': target,
+                            'role': role_id})
+            return members
+
+        def _build_user_assignment_equivalent_of_group(
+                user, group_id, template):
+            """Create a user assignment equivalent to the group one.
+
+            The template has had the 'group' entity removed, so
+            substitute a 'user' one. The 'assignment' link stays as it is,
+            referring to the group assignment that led to this role.
+            A 'membership' link is added that refers to this particular
+            user's membership of this group.
+
+            """
+            user_entry = copy.deepcopy(template)
+            user_entry['user'] = {'id': user['id']}
+            user_entry['links']['membership'] = (
+                self.base_url(context, '/groups/%s/users/%s' %
+                              (group_id, user['id'])))
+            return user_entry
+
+        def _build_project_equivalent_of_user_target_role(
+                project_id, target_id, target_type, template):
+            """Create a user project assignment equivalent to the domain one.
+
+            The template has had the 'domain' entity removed, so
+            substitute a 'project' one, modifying the 'assignment' link
+            to match.
+
+            """
+            project_entry = copy.deepcopy(template)
+            project_entry['scope']['project'] = {'id': project_id}
+            project_entry['links']['assignment'] = (
+                self.base_url(
+                    context,
+                    '/OS-INHERIT/%s/%s/users/%s/roles/%s'
+                    '/inherited_to_projects' % (
+                        target_type, target_id, project_entry['user']['id'],
+                        project_entry['role']['id'])))
+            return project_entry
+
+        def _build_project_equivalent_of_group_target_role(
+                user_id, group_id, project_id,
+                target_id, target_type, template):
+            """Create a user project equivalent to the domain group one.
+
+            The template has had the 'domain' and 'group' entities removed, so
+            substitute a 'user-project' one, modifying the 'assignment' link
+            to match.
+
+            """
+            project_entry = copy.deepcopy(template)
+            project_entry['user'] = {'id': user_id}
+            project_entry['scope']['project'] = {'id': project_id}
+            project_entry['links']['assignment'] = (
+                self.base_url(context,
+                              '/OS-INHERIT/%s/%s/groups/%s/roles/%s'
+                              '/inherited_to_projects' % (
+                                  target_type, target_id, group_id,
+                                  project_entry['role']['id'])))
+            project_entry['links']['membership'] = (
+                self.base_url(context, '/groups/%s/users/%s' %
+                              (group_id, user_id)))
+            return project_entry
+
+        # Scan the list of entities for any assignments that need to be
+        # expanded.
+        #
+        # If the OS-INERIT extension is enabled, the refs lists may
+        # contain roles to be inherited from domain to project, so expand
+        # these as well into project equivalents
+        #
+        # For any regular group entries, expand these into user entries based
+        # on membership of that group.
+        #
+        # Due to the potentially large expansions, rather than modify the
+        # list we are enumerating, we build a new one as we go.
+        #
+
+        new_refs = []
+        for r in refs:
+            if 'OS-INHERIT:inherited_to' in r['scope']:
+                if 'domain' in r['scope']:
+                    # It's an inherited domain role - so get the list of
+                    # projects owned by this domain.
+                    project_ids = (
+                        [x['id'] for x in
+                            self.resource_api.list_projects_in_domain(
+                                r['scope']['domain']['id'])])
+                    base_entry = copy.deepcopy(r)
+                    target_type = 'domains'
+                    target_id = base_entry['scope']['domain']['id']
+                    base_entry['scope'].pop('domain')
+                else:
+                    # It's an inherited project role - so get the list of
+                    # projects in this project subtree.
+                    project_id = r['scope']['project']['id']
+                    project_ids = (
+                        [x['id'] for x in
+                            self.resource_api.list_projects_in_subtree(
+                                project_id)])
+                    base_entry = copy.deepcopy(r)
+                    target_type = 'projects'
+                    target_id = base_entry['scope']['project']['id']
+                    base_entry['scope'].pop('project')
+
+                # For each project, create an equivalent role assignment
+                for p in project_ids:
+                    # If it's a group assignment, then create equivalent user
+                    # roles based on membership of the group
+                    if 'group' in base_entry:
+                        members = _get_group_members(base_entry)
+                        sub_entry = copy.deepcopy(base_entry)
+                        group_id = sub_entry['group']['id']
+                        sub_entry.pop('group')
+                        for m in members:
+                            new_entry = (
+                                _build_project_equivalent_of_group_target_role(
+                                    m['id'], group_id, p,
+                                    target_id, target_type, sub_entry))
+                            new_refs.append(new_entry)
+                    else:
+                        new_entry = (
+                            _build_project_equivalent_of_user_target_role(
+                                p, target_id, target_type, base_entry))
+                        new_refs.append(new_entry)
+            elif 'group' in r:
+                # It's a non-inherited group role assignment, so get the list
+                # of members.
+                members = _get_group_members(r)
+
+                # Now replace that group role assignment entry with an
+                # equivalent user role assignment for each of the group members
+                base_entry = copy.deepcopy(r)
+                group_id = base_entry['group']['id']
+                base_entry.pop('group')
+                for m in members:
+                    user_entry = _build_user_assignment_equivalent_of_group(
+                        m, group_id, base_entry)
+                    new_refs.append(user_entry)
+            else:
+                new_refs.append(r)
+
+        return new_refs
+
+    def _filter_inherited(self, entry):
+        if ('inherited_to_projects' in entry and
+                not CONF.os_inherit.enabled):
+            return False
+        else:
+            return True
+
+    def _assert_effective_filters(self, inherited, group, domain):
+        """Assert that useless filter combinations are avoided.
+
+        In effective mode, the following filter combinations are useless, since
+        they would always return an empty list of role assignments:
+        - group id, since no group assignment is returned in effective mode;
+        - domain id and inherited, since no domain inherited assignment is
+        returned in effective mode.
+
+        """
+        if group:
+            msg = _('Combining effective and group filter will always '
+                    'result in an empty list.')
+            raise exception.ValidationError(msg)
+
+        if inherited and domain:
+            msg = _('Combining effective, domain and inherited filters will '
+                    'always result in an empty list.')
+            raise exception.ValidationError(msg)
+
+    def _assert_domain_nand_project(self, domain_id, project_id):
+        if domain_id and project_id:
+            msg = _('Specify a domain or project, not both')
+            raise exception.ValidationError(msg)
+
+    def _assert_user_nand_group(self, user_id, group_id):
+        if user_id and group_id:
+            msg = _('Specify a user or group, not both')
+            raise exception.ValidationError(msg)
+
+    @controller.filterprotected('group.id', 'role.id',
+                                'scope.domain.id', 'scope.project.id',
+                                'scope.OS-INHERIT:inherited_to', 'user.id')
+    def list_role_assignments(self, context, filters):
+
+        # TODO(henry-nash): This implementation uses the standard filtering
+        # in the V3.wrap_collection. Given the large number of individual
+        # assignments, this is pretty inefficient.  An alternative would be
+        # to pass the filters into the driver call, so that the list size is
+        # kept a minimum.
+
+        params = context['query_string']
+        effective = 'effective' in params and (
+            self.query_filter_is_true(params['effective']))
+
+        if 'scope.OS-INHERIT:inherited_to' in params:
+            inherited = (
+                params['scope.OS-INHERIT:inherited_to'] == 'projects')
+        else:
+            # None means querying both inherited and direct assignments
+            inherited = None
+
+        self._assert_domain_nand_project(params.get('scope.domain.id'),
+                                         params.get('scope.project.id'))
+        self._assert_user_nand_group(params.get('user.id'),
+                                     params.get('group.id'))
+
+        if effective:
+            self._assert_effective_filters(inherited=inherited,
+                                           group=params.get('group.id'),
+                                           domain=params.get(
+                                               'scope.domain.id'))
+
+        hints = self.build_driver_hints(context, filters)
+        refs = self.assignment_api.list_role_assignments()
+        formatted_refs = (
+            [self._format_entity(context, x) for x in refs
+             if self._filter_inherited(x)])
+
+        if effective:
+            formatted_refs = self._expand_indirect_assignments(context,
+                                                               formatted_refs)
+
+        return self.wrap_collection(context, formatted_refs, hints=hints)
+
+    @controller.protected()
+    def get_role_assignment(self, context):
+        raise exception.NotImplemented()
+
+    @controller.protected()
+    def update_role_assignment(self, context):
+        raise exception.NotImplemented()
+
+    @controller.protected()
+    def delete_role_assignment(self, context):
+        raise exception.NotImplemented()
diff --git a/keystone-moon/keystone/assignment/core.py b/keystone-moon/keystone/assignment/core.py
new file mode 100644 (file)
index 0000000..0f9c03e
--- /dev/null
@@ -0,0 +1,1019 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Main entry point into the assignment service."""
+
+import abc
+
+from oslo_config import cfg
+from oslo_log import log
+import six
+
+from keystone.common import cache
+from keystone.common import dependency
+from keystone.common import driver_hints
+from keystone.common import manager
+from keystone import exception
+from keystone.i18n import _
+from keystone.i18n import _LI
+from keystone import notifications
+from keystone.openstack.common import versionutils
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+MEMOIZE = cache.get_memoization_decorator(section='role')
+
+
+def deprecated_to_role_api(f):
+    """Specialized deprecation wrapper for assignment to role api.
+
+    This wraps the standard deprecation wrapper and fills in the method
+    names automatically.
+
+    """
+    @six.wraps(f)
+    def wrapper(*args, **kwargs):
+        x = versionutils.deprecated(
+            what='assignment.' + f.__name__ + '()',
+            as_of=versionutils.deprecated.KILO,
+            in_favor_of='role.' + f.__name__ + '()')
+        return x(f)
+    return wrapper()
+
+
+def deprecated_to_resource_api(f):
+    """Specialized deprecation wrapper for assignment to resource api.
+
+    This wraps the standard deprecation wrapper and fills in the method
+    names automatically.
+
+    """
+    @six.wraps(f)
+    def wrapper(*args, **kwargs):
+        x = versionutils.deprecated(
+            what='assignment.' + f.__name__ + '()',
+            as_of=versionutils.deprecated.KILO,
+            in_favor_of='resource.' + f.__name__ + '()')
+        return x(f)
+    return wrapper()
+
+
+@dependency.provider('assignment_api')
+@dependency.requires('credential_api', 'identity_api', 'resource_api',
+                     'revoke_api', 'role_api')
+class Manager(manager.Manager):
+    """Default pivot point for the Assignment backend.
+
+    See :mod:`keystone.common.manager.Manager` for more details on how this
+    dynamically calls the backend.
+
+    """
+    _PROJECT = 'project'
+    _ROLE_REMOVED_FROM_USER = 'role_removed_from_user'
+    _INVALIDATION_USER_PROJECT_TOKENS = 'invalidate_user_project_tokens'
+
+    def __init__(self):
+        assignment_driver = CONF.assignment.driver
+
+        # If there is no explicit assignment driver specified, we let the
+        # identity driver tell us what to use. This is for backward
+        # compatibility reasons from the time when identity, resource and
+        # assignment were all part of identity.
+        if assignment_driver is None:
+            identity_driver = dependency.get_provider('identity_api').driver
+            assignment_driver = identity_driver.default_assignment_driver()
+
+        super(Manager, self).__init__(assignment_driver)
+
+    def _get_group_ids_for_user_id(self, user_id):
+        # TODO(morganfainberg): Implement a way to get only group_ids
+        # instead of the more expensive to_dict() call for each record.
+        return [x['id'] for
+                x in self.identity_api.list_groups_for_user(user_id)]
+
+    def list_user_ids_for_project(self, tenant_id):
+        self.resource_api.get_project(tenant_id)
+        return self.driver.list_user_ids_for_project(tenant_id)
+
+    def _list_parent_ids_of_project(self, project_id):
+        if CONF.os_inherit.enabled:
+            return [x['id'] for x in (
+                self.resource_api.list_project_parents(project_id))]
+        else:
+            return []
+
+    def get_roles_for_user_and_project(self, user_id, tenant_id):
+        """Get the roles associated with a user within given project.
+
+        This includes roles directly assigned to the user on the
+        project, as well as those by virtue of group membership. If
+        the OS-INHERIT extension is enabled, then this will also
+        include roles inherited from the domain.
+
+        :returns: a list of role ids.
+        :raises: keystone.exception.UserNotFound,
+                 keystone.exception.ProjectNotFound
+
+        """
+        def _get_group_project_roles(user_id, project_ref):
+            group_ids = self._get_group_ids_for_user_id(user_id)
+            return self.driver.list_role_ids_for_groups_on_project(
+                group_ids,
+                project_ref['id'],
+                project_ref['domain_id'],
+                self._list_parent_ids_of_project(project_ref['id']))
+
+        def _get_user_project_roles(user_id, project_ref):
+            role_list = []
+            try:
+                metadata_ref = self._get_metadata(user_id=user_id,
+                                                  tenant_id=project_ref['id'])
+                role_list = self._roles_from_role_dicts(
+                    metadata_ref.get('roles', {}), False)
+            except exception.MetadataNotFound:
+                pass
+
+            if CONF.os_inherit.enabled:
+                # Now get any inherited roles for the owning domain
+                try:
+                    metadata_ref = self._get_metadata(
+                        user_id=user_id, domain_id=project_ref['domain_id'])
+                    role_list += self._roles_from_role_dicts(
+                        metadata_ref.get('roles', {}), True)
+                except (exception.MetadataNotFound, exception.NotImplemented):
+                    pass
+                # As well inherited roles from parent projects
+                for p in self.list_project_parents(project_ref['id']):
+                    p_roles = self.list_grants(
+                        user_id=user_id, project_id=p['id'],
+                        inherited_to_projects=True)
+                    role_list += [x['id'] for x in p_roles]
+
+            return role_list
+
+        project_ref = self.resource_api.get_project(tenant_id)
+        user_role_list = _get_user_project_roles(user_id, project_ref)
+        group_role_list = _get_group_project_roles(user_id, project_ref)
+        # Use set() to process the list to remove any duplicates
+        return list(set(user_role_list + group_role_list))
+
+    def get_roles_for_user_and_domain(self, user_id, domain_id):
+        """Get the roles associated with a user within given domain.
+
+        :returns: a list of role ids.
+        :raises: keystone.exception.UserNotFound,
+                 keystone.exception.DomainNotFound
+
+        """
+
+        def _get_group_domain_roles(user_id, domain_id):
+            role_list = []
+            group_ids = self._get_group_ids_for_user_id(user_id)
+            for group_id in group_ids:
+                try:
+                    metadata_ref = self._get_metadata(group_id=group_id,
+                                                      domain_id=domain_id)
+                    role_list += self._roles_from_role_dicts(
+                        metadata_ref.get('roles', {}), False)
+                except (exception.MetadataNotFound, exception.NotImplemented):
+                    # MetadataNotFound implies no group grant, so skip.
+                    # Ignore NotImplemented since not all backends support
+                    # domains.
+                    pass
+            return role_list
+
+        def _get_user_domain_roles(user_id, domain_id):
+            metadata_ref = {}
+            try:
+                metadata_ref = self._get_metadata(user_id=user_id,
+                                                  domain_id=domain_id)
+            except (exception.MetadataNotFound, exception.NotImplemented):
+                # MetadataNotFound implies no user grants.
+                # Ignore NotImplemented since not all backends support
+                # domains
+                pass
+            return self._roles_from_role_dicts(
+                metadata_ref.get('roles', {}), False)
+
+        self.get_domain(domain_id)
+        user_role_list = _get_user_domain_roles(user_id, domain_id)
+        group_role_list = _get_group_domain_roles(user_id, domain_id)
+        # Use set() to process the list to remove any duplicates
+        return list(set(user_role_list + group_role_list))
+
+    def get_roles_for_groups(self, group_ids, project_id=None, domain_id=None):
+        """Get a list of roles for this group on domain and/or project."""
+
+        if project_id is not None:
+            project = self.resource_api.get_project(project_id)
+            role_ids = self.driver.list_role_ids_for_groups_on_project(
+                group_ids, project_id, project['domain_id'],
+                self._list_parent_ids_of_project(project_id))
+        elif domain_id is not None:
+            role_ids = self.driver.list_role_ids_for_groups_on_domain(
+                group_ids, domain_id)
+        else:
+            raise AttributeError(_("Must specify either domain or project"))
+
+        return self.role_api.list_roles_from_ids(role_ids)
+
+    def add_user_to_project(self, tenant_id, user_id):
+        """Add user to a tenant by creating a default role relationship.
+
+        :raises: keystone.exception.ProjectNotFound,
+                 keystone.exception.UserNotFound
+
+        """
+        self.resource_api.get_project(tenant_id)
+        try:
+            self.role_api.get_role(CONF.member_role_id)
+            self.driver.add_role_to_user_and_project(
+                user_id,
+                tenant_id,
+                CONF.member_role_id)
+        except exception.RoleNotFound:
+            LOG.info(_LI("Creating the default role %s "
+                         "because it does not exist."),
+                     CONF.member_role_id)
+            role = {'id': CONF.member_role_id,
+                    'name': CONF.member_role_name}
+            try:
+                self.role_api.create_role(CONF.member_role_id, role)
+            except exception.Conflict:
+                LOG.info(_LI("Creating the default role %s failed because it "
+                             "was already created"),
+                         CONF.member_role_id)
+            # now that default role exists, the add should succeed
+            self.driver.add_role_to_user_and_project(
+                user_id,
+                tenant_id,
+                CONF.member_role_id)
+
+    def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
+        self.resource_api.get_project(tenant_id)
+        self.role_api.get_role(role_id)
+        self.driver.add_role_to_user_and_project(user_id, tenant_id, role_id)
+
+    def remove_user_from_project(self, tenant_id, user_id):
+        """Remove user from a tenant
+
+        :raises: keystone.exception.ProjectNotFound,
+                 keystone.exception.UserNotFound
+
+        """
+        roles = self.get_roles_for_user_and_project(user_id, tenant_id)
+        if not roles:
+            raise exception.NotFound(tenant_id)
+        for role_id in roles:
+            try:
+                self.driver.remove_role_from_user_and_project(user_id,
+                                                              tenant_id,
+                                                              role_id)
+                self.revoke_api.revoke_by_grant(role_id, user_id=user_id,
+                                                project_id=tenant_id)
+
+            except exception.RoleNotFound:
+                LOG.debug("Removing role %s failed because it does not exist.",
+                          role_id)
+
+    # TODO(henry-nash): We might want to consider list limiting this at some
+    # point in the future.
+    def list_projects_for_user(self, user_id, hints=None):
+        # NOTE(henry-nash): In order to get a complete list of user projects,
+        # the driver will need to look at group assignments.  To avoid cross
+        # calling between the assignment and identity driver we get the group
+        # list here and pass it in. The rest of the detailed logic of listing
+        # projects for a user is pushed down into the driver to enable
+        # optimization with the various backend technologies (SQL, LDAP etc.).
+
+        group_ids = self._get_group_ids_for_user_id(user_id)
+        project_ids = self.driver.list_project_ids_for_user(
+            user_id, group_ids, hints or driver_hints.Hints())
+
+        if not CONF.os_inherit.enabled:
+            return self.resource_api.list_projects_from_ids(project_ids)
+
+        # Inherited roles are enabled, so check to see if this user has any
+        # inherited role (direct or group) on any parent project, in which
+        # case we must add in all the projects in that parent's subtree.
+        project_ids = set(project_ids)
+        project_ids_inherited = self.driver.list_project_ids_for_user(
+            user_id, group_ids, hints or driver_hints.Hints(), inherited=True)
+        for proj_id in project_ids_inherited:
+            project_ids.update(
+                (x['id'] for x in
+                 self.resource_api.list_projects_in_subtree(proj_id)))
+
+        # Now do the same for any domain inherited roles
+        domain_ids = self.driver.list_domain_ids_for_user(
+            user_id, group_ids, hints or driver_hints.Hints(),
+            inherited=True)
+        project_ids.update(
+            self.resource_api.list_project_ids_from_domain_ids(domain_ids))
+
+        return self.resource_api.list_projects_from_ids(list(project_ids))
+
+    # TODO(henry-nash): We might want to consider list limiting this at some
+    # point in the future.
+    def list_domains_for_user(self, user_id, hints=None):
+        # NOTE(henry-nash): In order to get a complete list of user domains,
+        # the driver will need to look at group assignments.  To avoid cross
+        # calling between the assignment and identity driver we get the group
+        # list here and pass it in. The rest of the detailed logic of listing
+        # projects for a user is pushed down into the driver to enable
+        # optimization with the various backend technologies (SQL, LDAP etc.).
+        group_ids = self._get_group_ids_for_user_id(user_id)
+        domain_ids = self.driver.list_domain_ids_for_user(
+            user_id, group_ids, hints or driver_hints.Hints())
+        return self.resource_api.list_domains_from_ids(domain_ids)
+
+    def list_domains_for_groups(self, group_ids):
+        domain_ids = self.driver.list_domain_ids_for_groups(group_ids)
+        return self.resource_api.list_domains_from_ids(domain_ids)
+
+    def list_projects_for_groups(self, group_ids):
+        project_ids = (
+            self.driver.list_project_ids_for_groups(group_ids,
+                                                    driver_hints.Hints()))
+        if not CONF.os_inherit.enabled:
+            return self.resource_api.list_projects_from_ids(project_ids)
+
+        # Inherited roles are enabled, so check to see if these groups have any
+        # roles on any domain, in which case we must add in all the projects
+        # in that domain.
+
+        domain_ids = self.driver.list_domain_ids_for_groups(
+            group_ids, inherited=True)
+
+        project_ids_from_domains = (
+            self.resource_api.list_project_ids_from_domain_ids(domain_ids))
+
+        return self.resource_api.list_projects_from_ids(
+            list(set(project_ids + project_ids_from_domains)))
+
+    def list_role_assignments_for_role(self, role_id=None):
+        # NOTE(henry-nash): Currently the efficiency of the key driver
+        # implementation (SQL) of list_role_assignments is severely hampered by
+        # the existence of the multiple grant tables - hence there is little
+        # advantage in pushing the logic of this method down into the driver.
+        # Once the single assignment table is implemented, then this situation
+        # will be different, and this method should have its own driver
+        # implementation.
+        return [r for r in self.driver.list_role_assignments()
+                if r['role_id'] == role_id]
+
+    def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
+        self.driver.remove_role_from_user_and_project(user_id, tenant_id,
+                                                      role_id)
+        self.identity_api.emit_invalidate_user_token_persistence(user_id)
+        self.revoke_api.revoke_by_grant(role_id, user_id=user_id,
+                                        project_id=tenant_id)
+
+    @notifications.internal(notifications.INVALIDATE_USER_TOKEN_PERSISTENCE)
+    def _emit_invalidate_user_token_persistence(self, user_id):
+        self.identity_api.emit_invalidate_user_token_persistence(user_id)
+
+    @notifications.role_assignment('created')
+    def create_grant(self, role_id, user_id=None, group_id=None,
+                     domain_id=None, project_id=None,
+                     inherited_to_projects=False, context=None):
+        self.role_api.get_role(role_id)
+        if domain_id:
+            self.resource_api.get_domain(domain_id)
+        if project_id:
+            self.resource_api.get_project(project_id)
+        self.driver.create_grant(role_id, user_id, group_id, domain_id,
+                                 project_id, inherited_to_projects)
+
+    def get_grant(self, role_id, user_id=None, group_id=None,
+                  domain_id=None, project_id=None,
+                  inherited_to_projects=False):
+        role_ref = self.role_api.get_role(role_id)
+        if domain_id:
+            self.resource_api.get_domain(domain_id)
+        if project_id:
+            self.resource_api.get_project(project_id)
+        self.driver.check_grant_role_id(
+            role_id, user_id, group_id, domain_id, project_id,
+            inherited_to_projects)
+        return role_ref
+
+    def list_grants(self, user_id=None, group_id=None,
+                    domain_id=None, project_id=None,
+                    inherited_to_projects=False):
+        if domain_id:
+            self.resource_api.get_domain(domain_id)
+        if project_id:
+            self.resource_api.get_project(project_id)
+        grant_ids = self.driver.list_grant_role_ids(
+            user_id, group_id, domain_id, project_id, inherited_to_projects)
+        return self.role_api.list_roles_from_ids(grant_ids)
+
+    @notifications.role_assignment('deleted')
+    def delete_grant(self, role_id, user_id=None, group_id=None,
+                     domain_id=None, project_id=None,
+                     inherited_to_projects=False, context=None):
+        if group_id is None:
+            self.revoke_api.revoke_by_grant(user_id=user_id,
+                                            role_id=role_id,
+                                            domain_id=domain_id,
+                                            project_id=project_id)
+        else:
+            try:
+                # NOTE(morganfainberg): The user ids are the important part
+                # for invalidating tokens below, so extract them here.
+                for user in self.identity_api.list_users_in_group(group_id):
+                    if user['id'] != user_id:
+                        self._emit_invalidate_user_token_persistence(
+                            user['id'])
+                        self.revoke_api.revoke_by_grant(
+                            user_id=user['id'], role_id=role_id,
+                            domain_id=domain_id, project_id=project_id)
+            except exception.GroupNotFound:
+                LOG.debug('Group %s not found, no tokens to invalidate.',
+                          group_id)
+
+        # TODO(henry-nash): While having the call to get_role here mimics the
+        # previous behavior (when it was buried inside the driver delete call),
+        # this seems an odd place to have this check, given what we have
+        # already done so far in this method. See Bug #1406776.
+        self.role_api.get_role(role_id)
+
+        if domain_id:
+            self.resource_api.get_domain(domain_id)
+        if project_id:
+            self.resource_api.get_project(project_id)
+        self.driver.delete_grant(role_id, user_id, group_id, domain_id,
+                                 project_id, inherited_to_projects)
+        if user_id is not None:
+            self._emit_invalidate_user_token_persistence(user_id)
+
+    def delete_tokens_for_role_assignments(self, role_id):
+        assignments = self.list_role_assignments_for_role(role_id=role_id)
+
+        # Iterate over the assignments for this role and build the list of
+        # user or user+project IDs for the tokens we need to delete
+        user_ids = set()
+        user_and_project_ids = list()
+        for assignment in assignments:
+            # If we have a project assignment, then record both the user and
+            # project IDs so we can target the right token to delete. If it is
+            # a domain assignment, we might as well kill all the tokens for
+            # the user, since in the vast majority of cases all the tokens
+            # for a user will be within one domain anyway, so not worth
+            # trying to delete tokens for each project in the domain.
+            if 'user_id' in assignment:
+                if 'project_id' in assignment:
+                    user_and_project_ids.append(
+                        (assignment['user_id'], assignment['project_id']))
+                elif 'domain_id' in assignment:
+                    self._emit_invalidate_user_token_persistence(
+                        assignment['user_id'])
+            elif 'group_id' in assignment:
+                # Add in any users for this group, being tolerant of any
+                # cross-driver database integrity errors.
+                try:
+                    users = self.identity_api.list_users_in_group(
+                        assignment['group_id'])
+                except exception.GroupNotFound:
+                    # Ignore it, but log a debug message
+                    if 'project_id' in assignment:
+                        target = _('Project (%s)') % assignment['project_id']
+                    elif 'domain_id' in assignment:
+                        target = _('Domain (%s)') % assignment['domain_id']
+                    else:
+                        target = _('Unknown Target')
+                    msg = ('Group (%(group)s), referenced in assignment '
+                           'for %(target)s, not found - ignoring.')
+                    LOG.debug(msg, {'group': assignment['group_id'],
+                                    'target': target})
+                    continue
+
+                if 'project_id' in assignment:
+                    for user in users:
+                        user_and_project_ids.append(
+                            (user['id'], assignment['project_id']))
+                elif 'domain_id' in assignment:
+                    for user in users:
+                        self._emit_invalidate_user_token_persistence(
+                            user['id'])
+
+        # Now process the built up lists.  Before issuing calls to delete any
+        # tokens, let's try and minimize the number of calls by pruning out
+        # any user+project deletions where a general token deletion for that
+        # same user is also planned.
+        user_and_project_ids_to_action = []
+        for user_and_project_id in user_and_project_ids:
+            if user_and_project_id[0] not in user_ids:
+                user_and_project_ids_to_action.append(user_and_project_id)
+
+        for user_id, project_id in user_and_project_ids_to_action:
+            self._emit_invalidate_user_project_tokens_notification(
+                {'user_id': user_id,
+                 'project_id': project_id})
+
+    @notifications.internal(
+        notifications.INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE)
+    def _emit_invalidate_user_project_tokens_notification(self, payload):
+        # This notification's payload is a dict of user_id and
+        # project_id so the token provider can invalidate the tokens
+        # from persistence if persistence is enabled.
+        pass
+
+    @deprecated_to_role_api
+    def create_role(self, role_id, role):
+        return self.role_api.create_role(role_id, role)
+
+    @deprecated_to_role_api
+    def get_role(self, role_id):
+        return self.role_api.get_role(role_id)
+
+    @deprecated_to_role_api
+    def update_role(self, role_id, role):
+        return self.role_api.update_role(role_id, role)
+
+    @deprecated_to_role_api
+    def delete_role(self, role_id):
+        return self.role_api.delete_role(role_id)
+
+    @deprecated_to_role_api
+    def list_roles(self, hints=None):
+        return self.role_api.list_roles(hints=hints)
+
+    @deprecated_to_resource_api
+    def create_project(self, project_id, project):
+        return self.resource_api.create_project(project_id, project)
+
+    @deprecated_to_resource_api
+    def get_project_by_name(self, tenant_name, domain_id):
+        return self.resource_api.get_project_by_name(tenant_name, domain_id)
+
+    @deprecated_to_resource_api
+    def get_project(self, project_id):
+        return self.resource_api.get_project(project_id)
+
+    @deprecated_to_resource_api
+    def update_project(self, project_id, project):
+        return self.resource_api.update_project(project_id, project)
+
+    @deprecated_to_resource_api
+    def delete_project(self, project_id):
+        return self.resource_api.delete_project(project_id)
+
+    @deprecated_to_resource_api
+    def list_projects(self, hints=None):
+        return self.resource_api.list_projects(hints=hints)
+
+    @deprecated_to_resource_api
+    def list_projects_in_domain(self, domain_id):
+        return self.resource_api.list_projects_in_domain(domain_id)
+
+    @deprecated_to_resource_api
+    def create_domain(self, domain_id, domain):
+        return self.resource_api.create_domain(domain_id, domain)
+
+    @deprecated_to_resource_api
+    def get_domain_by_name(self, domain_name):
+        return self.resource_api.get_domain_by_name(domain_name)
+
+    @deprecated_to_resource_api
+    def get_domain(self, domain_id):
+        return self.resource_api.get_domain(domain_id)
+
+    @deprecated_to_resource_api
+    def update_domain(self, domain_id, domain):
+        return self.resource_api.update_domain(domain_id, domain)
+
+    @deprecated_to_resource_api
+    def delete_domain(self, domain_id):
+        return self.resource_api.delete_domain(domain_id)
+
+    @deprecated_to_resource_api
+    def list_domains(self, hints=None):
+        return self.resource_api.list_domains(hints=hints)
+
+    @deprecated_to_resource_api
+    def assert_domain_enabled(self, domain_id, domain=None):
+        return self.resource_api.assert_domain_enabled(domain_id, domain)
+
+    @deprecated_to_resource_api
+    def assert_project_enabled(self, project_id, project=None):
+        return self.resource_api.assert_project_enabled(project_id, project)
+
+    @deprecated_to_resource_api
+    def is_leaf_project(self, project_id):
+        return self.resource_api.is_leaf_project(project_id)
+
+    @deprecated_to_resource_api
+    def list_project_parents(self, project_id, user_id=None):
+        return self.resource_api.list_project_parents(project_id, user_id)
+
+    @deprecated_to_resource_api
+    def list_projects_in_subtree(self, project_id, user_id=None):
+        return self.resource_api.list_projects_in_subtree(project_id, user_id)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Driver(object):
+
+    def _role_to_dict(self, role_id, inherited):
+        role_dict = {'id': role_id}
+        if inherited:
+            role_dict['inherited_to'] = 'projects'
+        return role_dict
+
+    def _roles_from_role_dicts(self, dict_list, inherited):
+        role_list = []
+        for d in dict_list:
+            if ((not d.get('inherited_to') and not inherited) or
+               (d.get('inherited_to') == 'projects' and inherited)):
+                role_list.append(d['id'])
+        return role_list
+
+    def _add_role_to_role_dicts(self, role_id, inherited, dict_list,
+                                allow_existing=True):
+        # There is a difference in error semantics when trying to
+        # assign a role that already exists between the coded v2 and v3
+        # API calls.  v2 will error if the assignment already exists,
+        # while v3 is silent. Setting the 'allow_existing' parameter
+        # appropriately lets this call be used for both.
+        role_set = set([frozenset(r.items()) for r in dict_list])
+        key = frozenset(self._role_to_dict(role_id, inherited).items())
+        if not allow_existing and key in role_set:
+            raise KeyError
+        role_set.add(key)
+        return [dict(r) for r in role_set]
+
+    def _remove_role_from_role_dicts(self, role_id, inherited, dict_list):
+        role_set = set([frozenset(r.items()) for r in dict_list])
+        role_set.remove(frozenset(self._role_to_dict(role_id,
+                                                     inherited).items()))
+        return [dict(r) for r in role_set]
+
+    def _get_list_limit(self):
+        return CONF.assignment.list_limit or CONF.list_limit
+
+    @abc.abstractmethod
+    def list_user_ids_for_project(self, tenant_id):
+        """Lists all user IDs with a role assignment in the specified project.
+
+        :returns: a list of user_ids or an empty set.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
+        """Add a role to a user within given tenant.
+
+        :raises: keystone.exception.Conflict
+
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
+        """Remove a role from a user within given tenant.
+
+        :raises: keystone.exception.RoleNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # assignment/grant crud
+
+    @abc.abstractmethod
+    def create_grant(self, role_id, user_id=None, group_id=None,
+                     domain_id=None, project_id=None,
+                     inherited_to_projects=False):
+        """Creates a new assignment/grant.
+
+        If the assignment is to a domain, then optionally it may be
+        specified as inherited to owned projects (this requires
+        the OS-INHERIT extension to be enabled).
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_grant_role_ids(self, user_id=None, group_id=None,
+                            domain_id=None, project_id=None,
+                            inherited_to_projects=False):
+        """Lists role ids for assignments/grants."""
+
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def check_grant_role_id(self, role_id, user_id=None, group_id=None,
+                            domain_id=None, project_id=None,
+                            inherited_to_projects=False):
+        """Checks an assignment/grant role id.
+
+        :raises: keystone.exception.RoleAssignmentNotFound
+        :returns: None or raises an exception if grant not found
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_grant(self, role_id, user_id=None, group_id=None,
+                     domain_id=None, project_id=None,
+                     inherited_to_projects=False):
+        """Deletes assignments/grants.
+
+        :raises: keystone.exception.RoleAssignmentNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_role_assignments(self):
+
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_project_ids_for_user(self, user_id, group_ids, hints,
+                                  inherited=False):
+        """List all project ids associated with a given user.
+
+        :param user_id: the user in question
+        :param group_ids: the groups this user is a member of.  This list is
+                          built in the Manager, so that the driver itself
+                          does not have to call across to identity.
+        :param hints: filter hints which the driver should
+                      implement if at all possible.
+        :param inherited: whether assignments marked as inherited should
+                          be included.
+
+        :returns: a list of project ids or an empty list.
+
+        This method should not try and expand any inherited assignments,
+        just report the projects that have the role for this user. The manager
+        method is responsible for expanding out inherited assignments.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_project_ids_for_groups(self, group_ids, hints,
+                                    inherited=False):
+        """List project ids accessible to specified groups.
+
+        :param group_ids: List of group ids.
+        :param hints: filter hints which the driver should
+                      implement if at all possible.
+        :param inherited: whether assignments marked as inherited should
+                          be included.
+        :returns: List of project ids accessible to specified groups.
+
+        This method should not try and expand any inherited assignments,
+        just report the projects that have the role for this group. The manager
+        method is responsible for expanding out inherited assignments.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_domain_ids_for_user(self, user_id, group_ids, hints,
+                                 inherited=False):
+        """List all domain ids associated with a given user.
+
+        :param user_id: the user in question
+        :param group_ids: the groups this user is a member of.  This list is
+                          built in the Manager, so that the driver itself
+                          does not have to call across to identity.
+        :param hints: filter hints which the driver should
+                      implement if at all possible.
+        :param inherited: whether to return domain_ids that have inherited
+                          assignments or not.
+
+        :returns: a list of domain ids or an empty list.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_domain_ids_for_groups(self, group_ids, inherited=False):
+        """List domain ids accessible to specified groups.
+
+        :param group_ids: List of group ids.
+        :param inherited: whether to return domain_ids that have inherited
+                          assignments or not.
+        :returns: List of domain ids accessible to specified groups.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_role_ids_for_groups_on_project(
+            self, group_ids, project_id, project_domain_id, project_parents):
+        """List the group role ids for a specific project.
+
+        Supports the ``OS-INHERIT`` role inheritance from the project's domain
+        if supported by the assignment driver.
+
+        :param group_ids: list of group ids
+        :type group_ids: list
+        :param project_id: project identifier
+        :type project_id: str
+        :param project_domain_id: project's domain identifier
+        :type project_domain_id: str
+        :param project_parents: list of parent ids of this project
+        :type project_parents: list
+        :returns: list of role ids for the project
+        :rtype: list
+        """
+        raise exception.NotImplemented()
+
+    @abc.abstractmethod
+    def list_role_ids_for_groups_on_domain(self, group_ids, domain_id):
+        """List the group role ids for a specific domain.
+
+        :param group_ids: list of group ids
+        :type group_ids: list
+        :param domain_id: domain identifier
+        :type domain_id: str
+        :returns: list of role ids for the project
+        :rtype: list
+        """
+        raise exception.NotImplemented()
+
+    @abc.abstractmethod
+    def delete_project_assignments(self, project_id):
+        """Deletes all assignments for a project.
+
+        :raises: keystone.exception.ProjectNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_role_assignments(self, role_id):
+        """Deletes all assignments for a role."""
+
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # TODO(henry-nash): Rename the following two methods to match the more
+    # meaningfully named ones above.
+
+# TODO(ayoung): determine what else these two functions raise
+    @abc.abstractmethod
+    def delete_user(self, user_id):
+        """Deletes all assignments for a user.
+
+        :raises: keystone.exception.RoleNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_group(self, group_id):
+        """Deletes all assignments for a group.
+
+        :raises: keystone.exception.RoleNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+
+@dependency.provider('role_api')
+@dependency.requires('assignment_api')
+class RoleManager(manager.Manager):
+    """Default pivot point for the Role backend."""
+
+    _ROLE = 'role'
+
+    def __init__(self):
+        # If there is a specific driver specified for role, then use it.
+        # Otherwise retrieve the driver type from the assignment driver.
+        role_driver = CONF.role.driver
+
+        if role_driver is None:
+            assignment_driver = (
+                dependency.get_provider('assignment_api').driver)
+            role_driver = assignment_driver.default_role_driver()
+
+        super(RoleManager, self).__init__(role_driver)
+
+    @MEMOIZE
+    def get_role(self, role_id):
+        return self.driver.get_role(role_id)
+
+    def create_role(self, role_id, role, initiator=None):
+        ret = self.driver.create_role(role_id, role)
+        notifications.Audit.created(self._ROLE, role_id, initiator)
+        if MEMOIZE.should_cache(ret):
+            self.get_role.set(ret, self, role_id)
+        return ret
+
+    @manager.response_truncated
+    def list_roles(self, hints=None):
+        return self.driver.list_roles(hints or driver_hints.Hints())
+
+    def update_role(self, role_id, role, initiator=None):
+        ret = self.driver.update_role(role_id, role)
+        notifications.Audit.updated(self._ROLE, role_id, initiator)
+        self.get_role.invalidate(self, role_id)
+        return ret
+
+    def delete_role(self, role_id, initiator=None):
+        try:
+            self.assignment_api.delete_tokens_for_role_assignments(role_id)
+        except exception.NotImplemented:
+            # FIXME(morganfainberg): Not all backends (ldap) implement
+            # `list_role_assignments_for_role` which would have previously
+            # caused a NotImplmented error to be raised when called through
+            # the controller. Now error or proper action will always come from
+            # the `delete_role` method logic. Work needs to be done to make
+            # the behavior between drivers consistent (capable of revoking
+            # tokens for the same circumstances).  This is related to the bug
+            # https://bugs.launchpad.net/keystone/+bug/1221805
+            pass
+        self.assignment_api.delete_role_assignments(role_id)
+        self.driver.delete_role(role_id)
+        notifications.Audit.deleted(self._ROLE, role_id, initiator)
+        self.get_role.invalidate(self, role_id)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class RoleDriver(object):
+
+    def _get_list_limit(self):
+        return CONF.role.list_limit or CONF.list_limit
+
+    @abc.abstractmethod
+    def create_role(self, role_id, role):
+        """Creates a new role.
+
+        :raises: keystone.exception.Conflict
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_roles(self, hints):
+        """List roles in the system.
+
+        :param hints: filter hints which the driver should
+                      implement if at all possible.
+
+        :returns: a list of role_refs or an empty list.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_roles_from_ids(self, role_ids):
+        """List roles for the provided list of ids.
+
+        :param role_ids: list of ids
+
+        :returns: a list of role_refs.
+
+        This method is used internally by the assignment manager to bulk read
+        a set of roles given their ids.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_role(self, role_id):
+        """Get a role by ID.
+
+        :returns: role_ref
+        :raises: keystone.exception.RoleNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def update_role(self, role_id, role):
+        """Updates an existing role.
+
+        :raises: keystone.exception.RoleNotFound,
+                 keystone.exception.Conflict
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_role(self, role_id):
+        """Deletes an existing role.
+
+        :raises: keystone.exception.RoleNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
diff --git a/keystone-moon/keystone/assignment/role_backends/__init__.py b/keystone-moon/keystone/assignment/role_backends/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/assignment/role_backends/ldap.py b/keystone-moon/keystone/assignment/role_backends/ldap.py
new file mode 100644 (file)
index 0000000..d5a06a4
--- /dev/null
@@ -0,0 +1,125 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from __future__ import absolute_import
+
+from oslo_config import cfg
+from oslo_log import log
+
+from keystone import assignment
+from keystone.common import ldap as common_ldap
+from keystone.common import models
+from keystone import exception
+from keystone.i18n import _
+from keystone.identity.backends import ldap as ldap_identity
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+class Role(assignment.RoleDriver):
+
+    def __init__(self):
+        super(Role, self).__init__()
+        self.LDAP_URL = CONF.ldap.url
+        self.LDAP_USER = CONF.ldap.user
+        self.LDAP_PASSWORD = CONF.ldap.password
+        self.suffix = CONF.ldap.suffix
+
+        # This is the only deep dependency from resource back
+        # to identity.  The assumption is that if you are using
+        # LDAP for resource, you are using it for identity as well.
+        self.user = ldap_identity.UserApi(CONF)
+        self.role = RoleApi(CONF, self.user)
+
+    def get_role(self, role_id):
+        return self.role.get(role_id)
+
+    def list_roles(self, hints):
+        return self.role.get_all()
+
+    def list_roles_from_ids(self, ids):
+        return [self.get_role(id) for id in ids]
+
+    def create_role(self, role_id, role):
+        self.role.check_allow_create()
+        try:
+            self.get_role(role_id)
+        except exception.NotFound:
+            pass
+        else:
+            msg = _('Duplicate ID, %s.') % role_id
+            raise exception.Conflict(type='role', details=msg)
+
+        try:
+            self.role.get_by_name(role['name'])
+        except exception.NotFound:
+            pass
+        else:
+            msg = _('Duplicate name, %s.') % role['name']
+            raise exception.Conflict(type='role', details=msg)
+
+        return self.role.create(role)
+
+    def delete_role(self, role_id):
+        self.role.check_allow_delete()
+        return self.role.delete(role_id)
+
+    def update_role(self, role_id, role):
+        self.role.check_allow_update()
+        self.get_role(role_id)
+        return self.role.update(role_id, role)
+
+
+# NOTE(heny-nash): A mixin class to enable the sharing of the LDAP structure
+# between here and the assignment LDAP.
+class RoleLdapStructureMixin(object):
+    DEFAULT_OU = 'ou=Roles'
+    DEFAULT_STRUCTURAL_CLASSES = []
+    DEFAULT_OBJECTCLASS = 'organizationalRole'
+    DEFAULT_MEMBER_ATTRIBUTE = 'roleOccupant'
+    NotFound = exception.RoleNotFound
+    options_name = 'role'
+    attribute_options_names = {'name': 'name'}
+    immutable_attrs = ['id']
+    model = models.Role
+
+
+# TODO(termie): turn this into a data object and move logic to driver
+class RoleApi(RoleLdapStructureMixin, common_ldap.BaseLdap):
+
+    def __init__(self, conf, user_api):
+        super(RoleApi, self).__init__(conf)
+        self._user_api = user_api
+
+    def get(self, role_id, role_filter=None):
+        model = super(RoleApi, self).get(role_id, role_filter)
+        return model
+
+    def create(self, values):
+        return super(RoleApi, self).create(values)
+
+    def update(self, role_id, role):
+        new_name = role.get('name')
+        if new_name is not None:
+            try:
+                old_role = self.get_by_name(new_name)
+                if old_role['id'] != role_id:
+                    raise exception.Conflict(
+                        _('Cannot duplicate name %s') % old_role)
+            except exception.NotFound:
+                pass
+        return super(RoleApi, self).update(role_id, role)
+
+    def delete(self, role_id):
+        super(RoleApi, self).delete(role_id)
diff --git a/keystone-moon/keystone/assignment/role_backends/sql.py b/keystone-moon/keystone/assignment/role_backends/sql.py
new file mode 100644 (file)
index 0000000..f19d182
--- /dev/null
@@ -0,0 +1,80 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone import assignment
+from keystone.common import sql
+from keystone import exception
+
+
+class Role(assignment.RoleDriver):
+
+    @sql.handle_conflicts(conflict_type='role')
+    def create_role(self, role_id, role):
+        with sql.transaction() as session:
+            ref = RoleTable.from_dict(role)
+            session.add(ref)
+            return ref.to_dict()
+
+    @sql.truncated
+    def list_roles(self, hints):
+        with sql.transaction() as session:
+            query = session.query(RoleTable)
+            refs = sql.filter_limit_query(RoleTable, query, hints)
+            return [ref.to_dict() for ref in refs]
+
+    def list_roles_from_ids(self, ids):
+        if not ids:
+            return []
+        else:
+            with sql.transaction() as session:
+                query = session.query(RoleTable)
+                query = query.filter(RoleTable.id.in_(ids))
+                role_refs = query.all()
+                return [role_ref.to_dict() for role_ref in role_refs]
+
+    def _get_role(self, session, role_id):
+        ref = session.query(RoleTable).get(role_id)
+        if ref is None:
+            raise exception.RoleNotFound(role_id=role_id)
+        return ref
+
+    def get_role(self, role_id):
+        with sql.transaction() as session:
+            return self._get_role(session, role_id).to_dict()
+
+    @sql.handle_conflicts(conflict_type='role')
+    def update_role(self, role_id, role):
+        with sql.transaction() as session:
+            ref = self._get_role(session, role_id)
+            old_dict = ref.to_dict()
+            for k in role:
+                old_dict[k] = role[k]
+            new_role = RoleTable.from_dict(old_dict)
+            for attr in RoleTable.attributes:
+                if attr != 'id':
+                    setattr(ref, attr, getattr(new_role, attr))
+            ref.extra = new_role.extra
+            return ref.to_dict()
+
+    def delete_role(self, role_id):
+        with sql.transaction() as session:
+            ref = self._get_role(session, role_id)
+            session.delete(ref)
+
+
+class RoleTable(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'role'
+    attributes = ['id', 'name']
+    id = sql.Column(sql.String(64), primary_key=True)
+    name = sql.Column(sql.String(255), unique=True, nullable=False)
+    extra = sql.Column(sql.JsonBlob())
+    __table_args__ = (sql.UniqueConstraint('name'), {})
diff --git a/keystone-moon/keystone/assignment/routers.py b/keystone-moon/keystone/assignment/routers.py
new file mode 100644 (file)
index 0000000..49549a0
--- /dev/null
@@ -0,0 +1,246 @@
+# Copyright 2013 Metacloud, Inc.
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""WSGI Routers for the Assignment service."""
+
+import functools
+
+from oslo_config import cfg
+
+from keystone.assignment import controllers
+from keystone.common import json_home
+from keystone.common import router
+from keystone.common import wsgi
+
+
+CONF = cfg.CONF
+
+build_os_inherit_relation = functools.partial(
+    json_home.build_v3_extension_resource_relation,
+    extension_name='OS-INHERIT', extension_version='1.0')
+
+
+class Public(wsgi.ComposableRouter):
+    def add_routes(self, mapper):
+        tenant_controller = controllers.TenantAssignment()
+        mapper.connect('/tenants',
+                       controller=tenant_controller,
+                       action='get_projects_for_token',
+                       conditions=dict(method=['GET']))
+
+
+class Admin(wsgi.ComposableRouter):
+    def add_routes(self, mapper):
+        # Role Operations
+        roles_controller = controllers.RoleAssignmentV2()
+        mapper.connect('/tenants/{tenant_id}/users/{user_id}/roles',
+                       controller=roles_controller,
+                       action='get_user_roles',
+                       conditions=dict(method=['GET']))
+        mapper.connect('/users/{user_id}/roles',
+                       controller=roles_controller,
+                       action='get_user_roles',
+                       conditions=dict(method=['GET']))
+
+
+class Routers(wsgi.RoutersBase):
+
+    def append_v3_routers(self, mapper, routers):
+
+        project_controller = controllers.ProjectAssignmentV3()
+        self._add_resource(
+            mapper, project_controller,
+            path='/users/{user_id}/projects',
+            get_action='list_user_projects',
+            rel=json_home.build_v3_resource_relation('user_projects'),
+            path_vars={
+                'user_id': json_home.Parameters.USER_ID,
+            })
+
+        routers.append(
+            router.Router(controllers.RoleV3(), 'roles', 'role',
+                          resource_descriptions=self.v3_resources))
+
+        grant_controller = controllers.GrantAssignmentV3()
+        self._add_resource(
+            mapper, grant_controller,
+            path='/projects/{project_id}/users/{user_id}/roles/{role_id}',
+            get_head_action='check_grant',
+            put_action='create_grant',
+            delete_action='revoke_grant',
+            rel=json_home.build_v3_resource_relation('project_user_role'),
+            path_vars={
+                'project_id': json_home.Parameters.PROJECT_ID,
+                'role_id': json_home.Parameters.ROLE_ID,
+                'user_id': json_home.Parameters.USER_ID,
+            })
+        self._add_resource(
+            mapper, grant_controller,
+            path='/projects/{project_id}/groups/{group_id}/roles/{role_id}',
+            get_head_action='check_grant',
+            put_action='create_grant',
+            delete_action='revoke_grant',
+            rel=json_home.build_v3_resource_relation('project_group_role'),
+            path_vars={
+                'group_id': json_home.Parameters.GROUP_ID,
+                'project_id': json_home.Parameters.PROJECT_ID,
+                'role_id': json_home.Parameters.ROLE_ID,
+            })
+        self._add_resource(
+            mapper, grant_controller,
+            path='/projects/{project_id}/users/{user_id}/roles',
+            get_action='list_grants',
+            rel=json_home.build_v3_resource_relation('project_user_roles'),
+            path_vars={
+                'project_id': json_home.Parameters.PROJECT_ID,
+                'user_id': json_home.Parameters.USER_ID,
+            })
+        self._add_resource(
+            mapper, grant_controller,
+            path='/projects/{project_id}/groups/{group_id}/roles',
+            get_action='list_grants',
+            rel=json_home.build_v3_resource_relation('project_group_roles'),
+            path_vars={
+                'group_id': json_home.Parameters.GROUP_ID,
+                'project_id': json_home.Parameters.PROJECT_ID,
+            })
+        self._add_resource(
+            mapper, grant_controller,
+            path='/domains/{domain_id}/users/{user_id}/roles/{role_id}',
+            get_head_action='check_grant',
+            put_action='create_grant',
+            delete_action='revoke_grant',
+            rel=json_home.build_v3_resource_relation('domain_user_role'),
+            path_vars={
+                'domain_id': json_home.Parameters.DOMAIN_ID,
+                'role_id': json_home.Parameters.ROLE_ID,
+                'user_id': json_home.Parameters.USER_ID,
+            })
+        self._add_resource(
+            mapper, grant_controller,
+            path='/domains/{domain_id}/groups/{group_id}/roles/{role_id}',
+            get_head_action='check_grant',
+            put_action='create_grant',
+            delete_action='revoke_grant',
+            rel=json_home.build_v3_resource_relation('domain_group_role'),
+            path_vars={
+                'domain_id': json_home.Parameters.DOMAIN_ID,
+                'group_id': json_home.Parameters.GROUP_ID,
+                'role_id': json_home.Parameters.ROLE_ID,
+            })
+        self._add_resource(
+            mapper, grant_controller,
+            path='/domains/{domain_id}/users/{user_id}/roles',
+            get_action='list_grants',
+            rel=json_home.build_v3_resource_relation('domain_user_roles'),
+            path_vars={
+                'domain_id': json_home.Parameters.DOMAIN_ID,
+                'user_id': json_home.Parameters.USER_ID,
+            })
+        self._add_resource(
+            mapper, grant_controller,
+            path='/domains/{domain_id}/groups/{group_id}/roles',
+            get_action='list_grants',
+            rel=json_home.build_v3_resource_relation('domain_group_roles'),
+            path_vars={
+                'domain_id': json_home.Parameters.DOMAIN_ID,
+                'group_id': json_home.Parameters.GROUP_ID,
+            })
+
+        routers.append(
+            router.Router(controllers.RoleAssignmentV3(),
+                          'role_assignments', 'role_assignment',
+                          resource_descriptions=self.v3_resources,
+                          is_entity_implemented=False))
+
+        if CONF.os_inherit.enabled:
+            self._add_resource(
+                mapper, grant_controller,
+                path='/OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/'
+                '{role_id}/inherited_to_projects',
+                get_head_action='check_grant',
+                put_action='create_grant',
+                delete_action='revoke_grant',
+                rel=build_os_inherit_relation(
+                    resource_name='domain_user_role_inherited_to_projects'),
+                path_vars={
+                    'domain_id': json_home.Parameters.DOMAIN_ID,
+                    'role_id': json_home.Parameters.ROLE_ID,
+                    'user_id': json_home.Parameters.USER_ID,
+                })
+            self._add_resource(
+                mapper, grant_controller,
+                path='/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/'
+                '{role_id}/inherited_to_projects',
+                get_head_action='check_grant',
+                put_action='create_grant',
+                delete_action='revoke_grant',
+                rel=build_os_inherit_relation(
+                    resource_name='domain_group_role_inherited_to_projects'),
+                path_vars={
+                    'domain_id': json_home.Parameters.DOMAIN_ID,
+                    'group_id': json_home.Parameters.GROUP_ID,
+                    'role_id': json_home.Parameters.ROLE_ID,
+                })
+            self._add_resource(
+                mapper, grant_controller,
+                path='/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/'
+                'inherited_to_projects',
+                get_action='list_grants',
+                rel=build_os_inherit_relation(
+                    resource_name='domain_group_roles_inherited_to_projects'),
+                path_vars={
+                    'domain_id': json_home.Parameters.DOMAIN_ID,
+                    'group_id': json_home.Parameters.GROUP_ID,
+                })
+            self._add_resource(
+                mapper, grant_controller,
+                path='/OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/'
+                'inherited_to_projects',
+                get_action='list_grants',
+                rel=build_os_inherit_relation(
+                    resource_name='domain_user_roles_inherited_to_projects'),
+                path_vars={
+                    'domain_id': json_home.Parameters.DOMAIN_ID,
+                    'user_id': json_home.Parameters.USER_ID,
+                })
+            self._add_resource(
+                mapper, grant_controller,
+                path='/OS-INHERIT/projects/{project_id}/users/{user_id}/roles/'
+                '{role_id}/inherited_to_projects',
+                get_head_action='check_grant',
+                put_action='create_grant',
+                delete_action='revoke_grant',
+                rel=build_os_inherit_relation(
+                    resource_name='project_user_role_inherited_to_projects'),
+                path_vars={
+                    'project_id': json_home.Parameters.PROJECT_ID,
+                    'user_id': json_home.Parameters.USER_ID,
+                    'role_id': json_home.Parameters.ROLE_ID,
+                })
+            self._add_resource(
+                mapper, grant_controller,
+                path='/OS-INHERIT/projects/{project_id}/groups/{group_id}/'
+                'roles/{role_id}/inherited_to_projects',
+                get_head_action='check_grant',
+                put_action='create_grant',
+                delete_action='revoke_grant',
+                rel=build_os_inherit_relation(
+                    resource_name='project_group_role_inherited_to_projects'),
+                path_vars={
+                    'project_id': json_home.Parameters.PROJECT_ID,
+                    'group_id': json_home.Parameters.GROUP_ID,
+                    'role_id': json_home.Parameters.ROLE_ID,
+                })
diff --git a/keystone-moon/keystone/assignment/schema.py b/keystone-moon/keystone/assignment/schema.py
new file mode 100644 (file)
index 0000000..f4d1b08
--- /dev/null
@@ -0,0 +1,32 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common.validation import parameter_types
+
+
+_role_properties = {
+    'name': parameter_types.name
+}
+
+role_create = {
+    'type': 'object',
+    'properties': _role_properties,
+    'required': ['name'],
+    'additionalProperties': True
+}
+
+role_update = {
+    'type': 'object',
+    'properties': _role_properties,
+    'minProperties': 1,
+    'additionalProperties': True
+}
diff --git a/keystone-moon/keystone/auth/__init__.py b/keystone-moon/keystone/auth/__init__.py
new file mode 100644 (file)
index 0000000..b1e4203
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.auth import controllers  # noqa
+from keystone.auth.core import *  # noqa
+from keystone.auth import routers  # noqa
diff --git a/keystone-moon/keystone/auth/controllers.py b/keystone-moon/keystone/auth/controllers.py
new file mode 100644 (file)
index 0000000..065f1f0
--- /dev/null
@@ -0,0 +1,647 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sys
+
+from keystoneclient.common import cms
+from oslo_config import cfg
+from oslo_log import log
+from oslo_serialization import jsonutils
+from oslo_utils import importutils
+from oslo_utils import timeutils
+import six
+
+from keystone.common import controller
+from keystone.common import dependency
+from keystone.common import wsgi
+from keystone import config
+from keystone.contrib import federation
+from keystone import exception
+from keystone.i18n import _, _LI, _LW
+from keystone.resource import controllers as resource_controllers
+
+
+LOG = log.getLogger(__name__)
+
+CONF = cfg.CONF
+
+# registry of authentication methods
+AUTH_METHODS = {}
+AUTH_PLUGINS_LOADED = False
+
+
+def load_auth_methods():
+    global AUTH_PLUGINS_LOADED
+
+    if AUTH_PLUGINS_LOADED:
+        # Only try and load methods a single time.
+        return
+    # config.setup_authentication should be idempotent, call it to ensure we
+    # have setup all the appropriate configuration options we may need.
+    config.setup_authentication()
+    for plugin in CONF.auth.methods:
+        if '.' in plugin:
+            # NOTE(morganfainberg): if '.' is in the plugin name, it should be
+            # imported rather than used as a plugin identifier.
+            plugin_class = plugin
+            driver = importutils.import_object(plugin)
+            if not hasattr(driver, 'method'):
+                raise ValueError(_('Cannot load an auth-plugin by class-name '
+                                   'without a "method" attribute defined: %s'),
+                                 plugin_class)
+
+            LOG.info(_LI('Loading auth-plugins by class-name is deprecated.'))
+            plugin_name = driver.method
+        else:
+            plugin_name = plugin
+            plugin_class = CONF.auth.get(plugin)
+            driver = importutils.import_object(plugin_class)
+        if plugin_name in AUTH_METHODS:
+            raise ValueError(_('Auth plugin %(plugin)s is requesting '
+                               'previously registered method %(method)s') %
+                             {'plugin': plugin_class, 'method': driver.method})
+        AUTH_METHODS[plugin_name] = driver
+    AUTH_PLUGINS_LOADED = True
+
+
+def get_auth_method(method_name):
+    global AUTH_METHODS
+    if method_name not in AUTH_METHODS:
+        raise exception.AuthMethodNotSupported()
+    return AUTH_METHODS[method_name]
+
+
+class AuthContext(dict):
+    """Retrofitting auth_context to reconcile identity attributes.
+
+    The identity attributes must not have conflicting values among the
+    auth plug-ins. The only exception is `expires_at`, which is set to its
+    earliest value.
+
+    """
+
+    # identity attributes need to be reconciled among the auth plugins
+    IDENTITY_ATTRIBUTES = frozenset(['user_id', 'project_id',
+                                     'access_token_id', 'domain_id',
+                                     'expires_at'])
+
+    def __setitem__(self, key, val):
+        if key in self.IDENTITY_ATTRIBUTES and key in self:
+            existing_val = self[key]
+            if key == 'expires_at':
+                # special treatment for 'expires_at', we are going to take
+                # the earliest expiration instead.
+                if existing_val != val:
+                    LOG.info(_LI('"expires_at" has conflicting values '
+                                 '%(existing)s and %(new)s.  Will use the '
+                                 'earliest value.'),
+                             {'existing': existing_val, 'new': val})
+                if existing_val is None or val is None:
+                    val = existing_val or val
+                else:
+                    val = min(existing_val, val)
+            elif existing_val != val:
+                msg = _('Unable to reconcile identity attribute %(attribute)s '
+                        'as it has conflicting values %(new)s and %(old)s') % (
+                            {'attribute': key,
+                             'new': val,
+                             'old': existing_val})
+                raise exception.Unauthorized(msg)
+        return super(AuthContext, self).__setitem__(key, val)
+
+
+# TODO(blk-u): this class doesn't use identity_api directly, but makes it
+# available for consumers. Consumers should probably not be getting
+# identity_api from this since it's available in global registry, then
+# identity_api should be removed from this list.
+@dependency.requires('identity_api', 'resource_api', 'trust_api')
+class AuthInfo(object):
+    """Encapsulation of "auth" request."""
+
+    @staticmethod
+    def create(context, auth=None):
+        auth_info = AuthInfo(context, auth=auth)
+        auth_info._validate_and_normalize_auth_data()
+        return auth_info
+
+    def __init__(self, context, auth=None):
+        self.context = context
+        self.auth = auth
+        self._scope_data = (None, None, None, None)
+        # self._scope_data is (domain_id, project_id, trust_ref, unscoped)
+        # project scope: (None, project_id, None, None)
+        # domain scope: (domain_id, None, None, None)
+        # trust scope: (None, None, trust_ref, None)
+        # unscoped: (None, None, None, 'unscoped')
+
+    def _assert_project_is_enabled(self, project_ref):
+        # ensure the project is enabled
+        try:
+            self.resource_api.assert_project_enabled(
+                project_id=project_ref['id'],
+                project=project_ref)
+        except AssertionError as e:
+            LOG.warning(six.text_type(e))
+            six.reraise(exception.Unauthorized, exception.Unauthorized(e),
+                        sys.exc_info()[2])
+
+    def _assert_domain_is_enabled(self, domain_ref):
+        try:
+            self.resource_api.assert_domain_enabled(
+                domain_id=domain_ref['id'],
+                domain=domain_ref)
+        except AssertionError as e:
+            LOG.warning(six.text_type(e))
+            six.reraise(exception.Unauthorized, exception.Unauthorized(e),
+                        sys.exc_info()[2])
+
+    def _lookup_domain(self, domain_info):
+        domain_id = domain_info.get('id')
+        domain_name = domain_info.get('name')
+        domain_ref = None
+        if not domain_id and not domain_name:
+            raise exception.ValidationError(attribute='id or name',
+                                            target='domain')
+        try:
+            if domain_name:
+                domain_ref = self.resource_api.get_domain_by_name(
+                    domain_name)
+            else:
+                domain_ref = self.resource_api.get_domain(domain_id)
+        except exception.DomainNotFound as e:
+            LOG.exception(six.text_type(e))
+            raise exception.Unauthorized(e)
+        self._assert_domain_is_enabled(domain_ref)
+        return domain_ref
+
+    def _lookup_project(self, project_info):
+        project_id = project_info.get('id')
+        project_name = project_info.get('name')
+        project_ref = None
+        if not project_id and not project_name:
+            raise exception.ValidationError(attribute='id or name',
+                                            target='project')
+        try:
+            if project_name:
+                if 'domain' not in project_info:
+                    raise exception.ValidationError(attribute='domain',
+                                                    target='project')
+                domain_ref = self._lookup_domain(project_info['domain'])
+                project_ref = self.resource_api.get_project_by_name(
+                    project_name, domain_ref['id'])
+            else:
+                project_ref = self.resource_api.get_project(project_id)
+                # NOTE(morganfainberg): The _lookup_domain method will raise
+                # exception.Unauthorized if the domain isn't found or is
+                # disabled.
+                self._lookup_domain({'id': project_ref['domain_id']})
+        except exception.ProjectNotFound as e:
+            raise exception.Unauthorized(e)
+        self._assert_project_is_enabled(project_ref)
+        return project_ref
+
+    def _lookup_trust(self, trust_info):
+        trust_id = trust_info.get('id')
+        if not trust_id:
+            raise exception.ValidationError(attribute='trust_id',
+                                            target='trust')
+        trust = self.trust_api.get_trust(trust_id)
+        if not trust:
+            raise exception.TrustNotFound(trust_id=trust_id)
+        return trust
+
+    def _validate_and_normalize_scope_data(self):
+        """Validate and normalize scope data."""
+        if 'scope' not in self.auth:
+            return
+        if sum(['project' in self.auth['scope'],
+                'domain' in self.auth['scope'],
+                'unscoped' in self.auth['scope'],
+                'OS-TRUST:trust' in self.auth['scope']]) != 1:
+            raise exception.ValidationError(
+                attribute='project, domain, OS-TRUST:trust or unscoped',
+                target='scope')
+        if 'unscoped' in self.auth['scope']:
+            self._scope_data = (None, None, None, 'unscoped')
+            return
+        if 'project' in self.auth['scope']:
+            project_ref = self._lookup_project(self.auth['scope']['project'])
+            self._scope_data = (None, project_ref['id'], None, None)
+        elif 'domain' in self.auth['scope']:
+            domain_ref = self._lookup_domain(self.auth['scope']['domain'])
+            self._scope_data = (domain_ref['id'], None, None, None)
+        elif 'OS-TRUST:trust' in self.auth['scope']:
+            if not CONF.trust.enabled:
+                raise exception.Forbidden('Trusts are disabled.')
+            trust_ref = self._lookup_trust(
+                self.auth['scope']['OS-TRUST:trust'])
+            # TODO(ayoung): when trusts support domains, fill in domain data
+            if trust_ref.get('project_id') is not None:
+                project_ref = self._lookup_project(
+                    {'id': trust_ref['project_id']})
+                self._scope_data = (None, project_ref['id'], trust_ref, None)
+            else:
+                self._scope_data = (None, None, trust_ref, None)
+
+    def _validate_auth_methods(self):
+        if 'identity' not in self.auth:
+            raise exception.ValidationError(attribute='identity',
+                                            target='auth')
+
+        # make sure auth methods are provided
+        if 'methods' not in self.auth['identity']:
+            raise exception.ValidationError(attribute='methods',
+                                            target='identity')
+
+        # make sure all the method data/payload are provided
+        for method_name in self.get_method_names():
+            if method_name not in self.auth['identity']:
+                raise exception.ValidationError(attribute=method_name,
+                                                target='identity')
+
+        # make sure auth method is supported
+        for method_name in self.get_method_names():
+            if method_name not in AUTH_METHODS:
+                raise exception.AuthMethodNotSupported()
+
+    def _validate_and_normalize_auth_data(self):
+        """Make sure "auth" is valid."""
+        # make sure "auth" exist
+        if not self.auth:
+            raise exception.ValidationError(attribute='auth',
+                                            target='request body')
+
+        self._validate_auth_methods()
+        self._validate_and_normalize_scope_data()
+
+    def get_method_names(self):
+        """Returns the identity method names.
+
+        :returns: list of auth method names
+
+        """
+        # Sanitizes methods received in request's body
+        # Filters out duplicates, while keeping elements' order.
+        method_names = []
+        for method in self.auth['identity']['methods']:
+            if method not in method_names:
+                method_names.append(method)
+        return method_names
+
+    def get_method_data(self, method):
+        """Get the auth method payload.
+
+        :returns: auth method payload
+
+        """
+        if method not in self.auth['identity']['methods']:
+            raise exception.ValidationError(attribute=method,
+                                            target='identity')
+        return self.auth['identity'][method]
+
+    def get_scope(self):
+        """Get scope information.
+
+        Verify and return the scoping information.
+
+        :returns: (domain_id, project_id, trust_ref, unscoped).
+                   If scope to a project, (None, project_id, None, None)
+                   will be returned.
+                   If scoped to a domain, (domain_id, None, None, None)
+                   will be returned.
+                   If scoped to a trust, (None, project_id, trust_ref, None),
+                   Will be returned, where the project_id comes from the
+                   trust definition.
+                   If unscoped, (None, None, None, 'unscoped') will be
+                   returned.
+
+        """
+        return self._scope_data
+
+    def set_scope(self, domain_id=None, project_id=None, trust=None,
+                  unscoped=None):
+        """Set scope information."""
+        if domain_id and project_id:
+            msg = _('Scoping to both domain and project is not allowed')
+            raise ValueError(msg)
+        if domain_id and trust:
+            msg = _('Scoping to both domain and trust is not allowed')
+            raise ValueError(msg)
+        if project_id and trust:
+            msg = _('Scoping to both project and trust is not allowed')
+            raise ValueError(msg)
+        self._scope_data = (domain_id, project_id, trust, unscoped)
+
+
+@dependency.requires('assignment_api', 'catalog_api', 'identity_api',
+                     'resource_api', 'token_provider_api', 'trust_api')
+class Auth(controller.V3Controller):
+
+    # Note(atiwari): From V3 auth controller code we are
+    # calling protection() wrappers, so we need to setup
+    # the member_name and  collection_name attributes of
+    # auth controller code.
+    # In the absence of these attributes, default 'entity'
+    # string will be used to represent the target which is
+    # generic. Policy can be defined using 'entity' but it
+    # would not reflect the exact entity that is in context.
+    # We are defining collection_name = 'tokens' and
+    # member_name = 'token' to facilitate policy decisions.
+    collection_name = 'tokens'
+    member_name = 'token'
+
+    def __init__(self, *args, **kw):
+        super(Auth, self).__init__(*args, **kw)
+        config.setup_authentication()
+
+    def authenticate_for_token(self, context, auth=None):
+        """Authenticate user and issue a token."""
+        include_catalog = 'nocatalog' not in context['query_string']
+
+        try:
+            auth_info = AuthInfo.create(context, auth=auth)
+            auth_context = AuthContext(extras={},
+                                       method_names=[],
+                                       bind={})
+            self.authenticate(context, auth_info, auth_context)
+            if auth_context.get('access_token_id'):
+                auth_info.set_scope(None, auth_context['project_id'], None)
+            self._check_and_set_default_scoping(auth_info, auth_context)
+            (domain_id, project_id, trust, unscoped) = auth_info.get_scope()
+
+            method_names = auth_info.get_method_names()
+            method_names += auth_context.get('method_names', [])
+            # make sure the list is unique
+            method_names = list(set(method_names))
+            expires_at = auth_context.get('expires_at')
+            # NOTE(morganfainberg): define this here so it is clear what the
+            # argument is during the issue_v3_token provider call.
+            metadata_ref = None
+
+            token_audit_id = auth_context.get('audit_id')
+
+            (token_id, token_data) = self.token_provider_api.issue_v3_token(
+                auth_context['user_id'], method_names, expires_at, project_id,
+                domain_id, auth_context, trust, metadata_ref, include_catalog,
+                parent_audit_id=token_audit_id)
+
+            # NOTE(wanghong): We consume a trust use only when we are using
+            # trusts and have successfully issued a token.
+            if trust:
+                self.trust_api.consume_use(trust['id'])
+
+            return render_token_data_response(token_id, token_data,
+                                              created=True)
+        except exception.TrustNotFound as e:
+            raise exception.Unauthorized(e)
+
+    def _check_and_set_default_scoping(self, auth_info, auth_context):
+        (domain_id, project_id, trust, unscoped) = auth_info.get_scope()
+        if trust:
+            project_id = trust['project_id']
+        if domain_id or project_id or trust:
+            # scope is specified
+            return
+
+        # Skip scoping when unscoped federated token is being issued
+        if federation.IDENTITY_PROVIDER in auth_context:
+            return
+
+        # Do not scope if request is for explicitly unscoped token
+        if unscoped is not None:
+            return
+
+        # fill in default_project_id if it is available
+        try:
+            user_ref = self.identity_api.get_user(auth_context['user_id'])
+        except exception.UserNotFound as e:
+            LOG.exception(six.text_type(e))
+            raise exception.Unauthorized(e)
+
+        default_project_id = user_ref.get('default_project_id')
+        if not default_project_id:
+            # User has no default project. He shall get an unscoped token.
+            return
+
+        # make sure user's default project is legit before scoping to it
+        try:
+            default_project_ref = self.resource_api.get_project(
+                default_project_id)
+            default_project_domain_ref = self.resource_api.get_domain(
+                default_project_ref['domain_id'])
+            if (default_project_ref.get('enabled', True) and
+                    default_project_domain_ref.get('enabled', True)):
+                if self.assignment_api.get_roles_for_user_and_project(
+                        user_ref['id'], default_project_id):
+                    auth_info.set_scope(project_id=default_project_id)
+                else:
+                    msg = _LW("User %(user_id)s doesn't have access to"
+                              " default project %(project_id)s. The token"
+                              " will be unscoped rather than scoped to the"
+                              " project.")
+                    LOG.warning(msg,
+                                {'user_id': user_ref['id'],
+                                 'project_id': default_project_id})
+            else:
+                msg = _LW("User %(user_id)s's default project %(project_id)s"
+                          " is disabled. The token will be unscoped rather"
+                          " than scoped to the project.")
+                LOG.warning(msg,
+                            {'user_id': user_ref['id'],
+                             'project_id': default_project_id})
+        except (exception.ProjectNotFound, exception.DomainNotFound):
+            # default project or default project domain doesn't exist,
+            # will issue unscoped token instead
+            msg = _LW("User %(user_id)s's default project %(project_id)s not"
+                      " found. The token will be unscoped rather than"
+                      " scoped to the project.")
+            LOG.warning(msg, {'user_id': user_ref['id'],
+                              'project_id': default_project_id})
+
+    def authenticate(self, context, auth_info, auth_context):
+        """Authenticate user."""
+
+        # The 'external' method allows any 'REMOTE_USER' based authentication
+        # In some cases the server can set REMOTE_USER as '' instead of
+        # dropping it, so this must be filtered out
+        if context['environment'].get('REMOTE_USER'):
+            try:
+                external = get_auth_method('external')
+                external.authenticate(context, auth_info, auth_context)
+            except exception.AuthMethodNotSupported:
+                # This will happen there is no 'external' plugin registered
+                # and the container is performing authentication.
+                # The 'kerberos'  and 'saml' methods will be used this way.
+                # In those cases, it is correct to not register an
+                # 'external' plugin;  if there is both an 'external' and a
+                # 'kerberos' plugin, it would run the check on identity twice.
+                LOG.debug("No 'external' plugin is registered.")
+            except exception.Unauthorized:
+                # If external fails then continue and attempt to determine
+                # user identity using remaining auth methods
+                LOG.debug("Authorization failed for 'external' auth method.")
+
+        # need to aggregate the results in case two or more methods
+        # are specified
+        auth_response = {'methods': []}
+        for method_name in auth_info.get_method_names():
+            method = get_auth_method(method_name)
+            resp = method.authenticate(context,
+                                       auth_info.get_method_data(method_name),
+                                       auth_context)
+            if resp:
+                auth_response['methods'].append(method_name)
+                auth_response[method_name] = resp
+
+        if auth_response["methods"]:
+            # authentication continuation required
+            raise exception.AdditionalAuthRequired(auth_response)
+
+        if 'user_id' not in auth_context:
+            msg = _('User not found')
+            raise exception.Unauthorized(msg)
+
+    @controller.protected()
+    def check_token(self, context):
+        token_id = context.get('subject_token_id')
+        token_data = self.token_provider_api.validate_v3_token(
+            token_id)
+        # NOTE(morganfainberg): The code in
+        # ``keystone.common.wsgi.render_response`` will remove the content
+        # body.
+        return render_token_data_response(token_id, token_data)
+
+    @controller.protected()
+    def revoke_token(self, context):
+        token_id = context.get('subject_token_id')
+        return self.token_provider_api.revoke_token(token_id)
+
+    @controller.protected()
+    def validate_token(self, context):
+        token_id = context.get('subject_token_id')
+        include_catalog = 'nocatalog' not in context['query_string']
+        token_data = self.token_provider_api.validate_v3_token(
+            token_id)
+        if not include_catalog and 'catalog' in token_data['token']:
+            del token_data['token']['catalog']
+        return render_token_data_response(token_id, token_data)
+
+    @controller.protected()
+    def revocation_list(self, context, auth=None):
+        if not CONF.token.revoke_by_id:
+            raise exception.Gone()
+        tokens = self.token_provider_api.list_revoked_tokens()
+
+        for t in tokens:
+            expires = t['expires']
+            if not (expires and isinstance(expires, six.text_type)):
+                t['expires'] = timeutils.isotime(expires)
+        data = {'revoked': tokens}
+        json_data = jsonutils.dumps(data)
+        signed_text = cms.cms_sign_text(json_data,
+                                        CONF.signing.certfile,
+                                        CONF.signing.keyfile)
+
+        return {'signed': signed_text}
+
+    def _combine_lists_uniquely(self, a, b):
+        # it's most likely that only one of these will be filled so avoid
+        # the combination if possible.
+        if a and b:
+            return {x['id']: x for x in a + b}.values()
+        else:
+            return a or b
+
+    @controller.protected()
+    def get_auth_projects(self, context):
+        auth_context = self.get_auth_context(context)
+
+        user_id = auth_context.get('user_id')
+        user_refs = []
+        if user_id:
+            try:
+                user_refs = self.assignment_api.list_projects_for_user(user_id)
+            except exception.UserNotFound:
+                # federated users have an id but they don't link to anything
+                pass
+
+        group_ids = auth_context.get('group_ids')
+        grp_refs = []
+        if group_ids:
+            grp_refs = self.assignment_api.list_projects_for_groups(group_ids)
+
+        refs = self._combine_lists_uniquely(user_refs, grp_refs)
+        return resource_controllers.ProjectV3.wrap_collection(context, refs)
+
+    @controller.protected()
+    def get_auth_domains(self, context):
+        auth_context = self.get_auth_context(context)
+
+        user_id = auth_context.get('user_id')
+        user_refs = []
+        if user_id:
+            try:
+                user_refs = self.assignment_api.list_domains_for_user(user_id)
+            except exception.UserNotFound:
+                # federated users have an id but they don't link to anything
+                pass
+
+        group_ids = auth_context.get('group_ids')
+        grp_refs = []
+        if group_ids:
+            grp_refs = self.assignment_api.list_domains_for_groups(group_ids)
+
+        refs = self._combine_lists_uniquely(user_refs, grp_refs)
+        return resource_controllers.DomainV3.wrap_collection(context, refs)
+
+    @controller.protected()
+    def get_auth_catalog(self, context):
+        auth_context = self.get_auth_context(context)
+        user_id = auth_context.get('user_id')
+        project_id = auth_context.get('project_id')
+
+        if not project_id:
+            raise exception.Forbidden(
+                _('A project-scoped token is required to produce a service '
+                  'catalog.'))
+
+        # The V3Controller base methods mostly assume that you're returning
+        # either a collection or a single element from a collection, neither of
+        # which apply to the catalog. Because this is a special case, this
+        # re-implements a tiny bit of work done by the base controller (such as
+        # self-referential link building) to avoid overriding or refactoring
+        # several private methods.
+        return {
+            'catalog': self.catalog_api.get_v3_catalog(user_id, project_id),
+            'links': {'self': self.base_url(context, path='auth/catalog')}
+        }
+
+
+# FIXME(gyee): not sure if it belongs here or keystone.common. Park it here
+# for now.
+def render_token_data_response(token_id, token_data, created=False):
+    """Render token data HTTP response.
+
+    Stash token ID into the X-Subject-Token header.
+
+    """
+    headers = [('X-Subject-Token', token_id)]
+
+    if created:
+        status = (201, 'Created')
+    else:
+        status = (200, 'OK')
+
+    return wsgi.render_response(body=token_data,
+                                status=status, headers=headers)
diff --git a/keystone-moon/keystone/auth/core.py b/keystone-moon/keystone/auth/core.py
new file mode 100644 (file)
index 0000000..9da2c12
--- /dev/null
@@ -0,0 +1,94 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+
+import six
+
+from keystone import exception
+
+
+@six.add_metaclass(abc.ABCMeta)
+class AuthMethodHandler(object):
+    """Abstract base class for an authentication plugin."""
+
+    def __init__(self):
+        pass
+
+    @abc.abstractmethod
+    def authenticate(self, context, auth_payload, auth_context):
+        """Authenticate user and return an authentication context.
+
+        :param context: keystone's request context
+        :param auth_payload: the content of the authentication for a given
+                             method
+        :param auth_context: user authentication context, a dictionary shared
+                             by all plugins. It contains "method_names" and
+                             "extras" by default. "method_names" is a list and
+                             "extras" is a dictionary.
+
+        If successful, plugin must set ``user_id`` in ``auth_context``.
+        ``method_name`` is used to convey any additional authentication methods
+        in case authentication is for re-scoping. For example, if the
+        authentication is for re-scoping, plugin must append the previous
+        method names into ``method_names``. Also, plugin may add any additional
+        information into ``extras``. Anything in ``extras`` will be conveyed in
+        the token's ``extras`` attribute. Here's an example of ``auth_context``
+        on successful authentication::
+
+            {
+                "extras": {},
+                "methods": [
+                    "password",
+                    "token"
+                ],
+                "user_id": "abc123"
+            }
+
+        Plugins are invoked in the order in which they are specified in the
+        ``methods`` attribute of the ``identity`` object. For example,
+        ``custom-plugin`` is invoked before ``password``, which is invoked
+        before ``token`` in the following authentication request::
+
+            {
+                "auth": {
+                    "identity": {
+                        "custom-plugin": {
+                            "custom-data": "sdfdfsfsfsdfsf"
+                        },
+                        "methods": [
+                            "custom-plugin",
+                            "password",
+                            "token"
+                        ],
+                        "password": {
+                            "user": {
+                                "id": "s23sfad1",
+                                "password": "secrete"
+                            }
+                        },
+                        "token": {
+                            "id": "sdfafasdfsfasfasdfds"
+                        }
+                    }
+                }
+            }
+
+        :returns: None if authentication is successful.
+                  Authentication payload in the form of a dictionary for the
+                  next authentication step if this is a multi step
+                  authentication.
+        :raises: exception.Unauthorized for authentication failure
+        """
+        raise exception.Unauthorized()
diff --git a/keystone-moon/keystone/auth/plugins/__init__.py b/keystone-moon/keystone/auth/plugins/__init__.py
new file mode 100644 (file)
index 0000000..5da5470
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright 2015 CERN
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.auth.plugins.core import *  # noqa
diff --git a/keystone-moon/keystone/auth/plugins/core.py b/keystone-moon/keystone/auth/plugins/core.py
new file mode 100644 (file)
index 0000000..96a5ecf
--- /dev/null
@@ -0,0 +1,186 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sys
+
+from oslo_config import cfg
+from oslo_log import log
+import six
+
+from keystone.common import dependency
+from keystone import exception
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+def construct_method_map_from_config():
+    """Determine authentication method types for deployment.
+
+    :returns: a dictionary containing the methods and their indexes
+
+    """
+    method_map = dict()
+    method_index = 1
+    for method in CONF.auth.methods:
+        method_map[method_index] = method
+        method_index = method_index * 2
+
+    return method_map
+
+
+def convert_method_list_to_integer(methods):
+    """Convert the method type(s) to an integer.
+
+    :param methods: a list of method names
+    :returns: an integer representing the methods
+
+    """
+    method_map = construct_method_map_from_config()
+
+    method_ints = []
+    for method in methods:
+        for k, v in six.iteritems(method_map):
+            if v == method:
+                method_ints.append(k)
+    return sum(method_ints)
+
+
+def convert_integer_to_method_list(method_int):
+    """Convert an integer to a list of methods.
+
+    :param method_int: an integer representing methods
+    :returns: a corresponding list of methods
+
+    """
+    # If the method_int is 0 then no methods were used so return an empty
+    # method list
+    if method_int == 0:
+        return []
+
+    method_map = construct_method_map_from_config()
+    method_ints = []
+    for k, v in six.iteritems(method_map):
+        method_ints.append(k)
+    method_ints.sort(reverse=True)
+
+    confirmed_methods = []
+    for m_int in method_ints:
+        # (lbragstad): By dividing the method_int by each key in the
+        # method_map, we know if the division results in an integer of 1, that
+        # key was used in the construction of the total sum of the method_int.
+        # In that case, we should confirm the key value and store it so we can
+        # look it up later. Then we should take the remainder of what is
+        # confirmed and the method_int and continue the process. In the end, we
+        # should have a list of integers that correspond to indexes in our
+        # method_map and we can reinflate the methods that the original
+        # method_int represents.
+        if (method_int / m_int) == 1:
+            confirmed_methods.append(m_int)
+            method_int = method_int - m_int
+
+    methods = []
+    for method in confirmed_methods:
+        methods.append(method_map[method])
+
+    return methods
+
+
+@dependency.requires('identity_api', 'resource_api')
+class UserAuthInfo(object):
+
+    @staticmethod
+    def create(auth_payload, method_name):
+        user_auth_info = UserAuthInfo()
+        user_auth_info._validate_and_normalize_auth_data(auth_payload)
+        user_auth_info.METHOD_NAME = method_name
+        return user_auth_info
+
+    def __init__(self):
+        self.user_id = None
+        self.password = None
+        self.user_ref = None
+        self.METHOD_NAME = None
+
+    def _assert_domain_is_enabled(self, domain_ref):
+        try:
+            self.resource_api.assert_domain_enabled(
+                domain_id=domain_ref['id'],
+                domain=domain_ref)
+        except AssertionError as e:
+            LOG.warning(six.text_type(e))
+            six.reraise(exception.Unauthorized, exception.Unauthorized(e),
+                        sys.exc_info()[2])
+
+    def _assert_user_is_enabled(self, user_ref):
+        try:
+            self.identity_api.assert_user_enabled(
+                user_id=user_ref['id'],
+                user=user_ref)
+        except AssertionError as e:
+            LOG.warning(six.text_type(e))
+            six.reraise(exception.Unauthorized, exception.Unauthorized(e),
+                        sys.exc_info()[2])
+
+    def _lookup_domain(self, domain_info):
+        domain_id = domain_info.get('id')
+        domain_name = domain_info.get('name')
+        domain_ref = None
+        if not domain_id and not domain_name:
+            raise exception.ValidationError(attribute='id or name',
+                                            target='domain')
+        try:
+            if domain_name:
+                domain_ref = self.resource_api.get_domain_by_name(
+                    domain_name)
+            else:
+                domain_ref = self.resource_api.get_domain(domain_id)
+        except exception.DomainNotFound as e:
+            LOG.exception(six.text_type(e))
+            raise exception.Unauthorized(e)
+        self._assert_domain_is_enabled(domain_ref)
+        return domain_ref
+
+    def _validate_and_normalize_auth_data(self, auth_payload):
+        if 'user' not in auth_payload:
+            raise exception.ValidationError(attribute='user',
+                                            target=self.METHOD_NAME)
+        user_info = auth_payload['user']
+        user_id = user_info.get('id')
+        user_name = user_info.get('name')
+        user_ref = None
+        if not user_id and not user_name:
+            raise exception.ValidationError(attribute='id or name',
+                                            target='user')
+        self.password = user_info.get('password')
+        try:
+            if user_name:
+                if 'domain' not in user_info:
+                    raise exception.ValidationError(attribute='domain',
+                                                    target='user')
+                domain_ref = self._lookup_domain(user_info['domain'])
+                user_ref = self.identity_api.get_user_by_name(
+                    user_name, domain_ref['id'])
+            else:
+                user_ref = self.identity_api.get_user(user_id)
+                domain_ref = self.resource_api.get_domain(
+                    user_ref['domain_id'])
+                self._assert_domain_is_enabled(domain_ref)
+        except exception.UserNotFound as e:
+            LOG.exception(six.text_type(e))
+            raise exception.Unauthorized(e)
+        self._assert_user_is_enabled(user_ref)
+        self.user_ref = user_ref
+        self.user_id = user_ref['id']
+        self.domain_id = domain_ref['id']
diff --git a/keystone-moon/keystone/auth/plugins/external.py b/keystone-moon/keystone/auth/plugins/external.py
new file mode 100644 (file)
index 0000000..2322649
--- /dev/null
@@ -0,0 +1,186 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Keystone External Authentication Plugins"""
+
+import abc
+
+from oslo_config import cfg
+import six
+
+from keystone import auth
+from keystone.common import dependency
+from keystone import exception
+from keystone.i18n import _
+from keystone.openstack.common import versionutils
+
+
+CONF = cfg.CONF
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Base(auth.AuthMethodHandler):
+
+    method = 'external'
+
+    def authenticate(self, context, auth_info, auth_context):
+        """Use REMOTE_USER to look up the user in the identity backend.
+
+        auth_context is an in-out variable that will be updated with the
+        user_id from the actual user from the REMOTE_USER env variable.
+        """
+        try:
+            REMOTE_USER = context['environment']['REMOTE_USER']
+        except KeyError:
+            msg = _('No authenticated user')
+            raise exception.Unauthorized(msg)
+        try:
+            user_ref = self._authenticate(REMOTE_USER, context)
+            auth_context['user_id'] = user_ref['id']
+            if ('kerberos' in CONF.token.bind and
+                (context['environment'].get('AUTH_TYPE', '').lower()
+                 == 'negotiate')):
+                auth_context['bind']['kerberos'] = user_ref['name']
+        except Exception:
+            msg = _('Unable to lookup user %s') % (REMOTE_USER)
+            raise exception.Unauthorized(msg)
+
+    @abc.abstractmethod
+    def _authenticate(self, remote_user, context):
+        """Look up the user in the identity backend.
+
+        Return user_ref
+        """
+        pass
+
+
+@dependency.requires('identity_api')
+class DefaultDomain(Base):
+    def _authenticate(self, remote_user, context):
+        """Use remote_user to look up the user in the identity backend."""
+        domain_id = CONF.identity.default_domain_id
+        user_ref = self.identity_api.get_user_by_name(remote_user, domain_id)
+        return user_ref
+
+
+@dependency.requires('identity_api', 'resource_api')
+class Domain(Base):
+    def _authenticate(self, remote_user, context):
+        """Use remote_user to look up the user in the identity backend.
+
+        The domain will be extracted from the REMOTE_DOMAIN environment
+        variable if present. If not, the default domain will be used.
+        """
+
+        username = remote_user
+        try:
+            domain_name = context['environment']['REMOTE_DOMAIN']
+        except KeyError:
+            domain_id = CONF.identity.default_domain_id
+        else:
+            domain_ref = self.resource_api.get_domain_by_name(domain_name)
+            domain_id = domain_ref['id']
+
+        user_ref = self.identity_api.get_user_by_name(username, domain_id)
+        return user_ref
+
+
+@dependency.requires('assignment_api', 'identity_api')
+class KerberosDomain(Domain):
+    """Allows `kerberos` as a method."""
+    method = 'kerberos'
+
+    def _authenticate(self, remote_user, context):
+        auth_type = context['environment'].get('AUTH_TYPE')
+        if auth_type != 'Negotiate':
+            raise exception.Unauthorized(_("auth_type is not Negotiate"))
+        return super(KerberosDomain, self)._authenticate(remote_user, context)
+
+
+class ExternalDefault(DefaultDomain):
+    """Deprecated. Please use keystone.auth.external.DefaultDomain instead."""
+
+    @versionutils.deprecated(
+        as_of=versionutils.deprecated.ICEHOUSE,
+        in_favor_of='keystone.auth.external.DefaultDomain',
+        remove_in=+1)
+    def __init__(self):
+        super(ExternalDefault, self).__init__()
+
+
+class ExternalDomain(Domain):
+    """Deprecated. Please use keystone.auth.external.Domain instead."""
+
+    @versionutils.deprecated(
+        as_of=versionutils.deprecated.ICEHOUSE,
+        in_favor_of='keystone.auth.external.Domain',
+        remove_in=+1)
+    def __init__(self):
+        super(ExternalDomain, self).__init__()
+
+
+@dependency.requires('identity_api')
+class LegacyDefaultDomain(Base):
+    """Deprecated. Please use keystone.auth.external.DefaultDomain instead.
+
+    This plugin exists to provide compatibility for the unintended behavior
+    described here: https://bugs.launchpad.net/keystone/+bug/1253484
+
+    """
+
+    @versionutils.deprecated(
+        as_of=versionutils.deprecated.ICEHOUSE,
+        in_favor_of='keystone.auth.external.DefaultDomain',
+        remove_in=+1)
+    def __init__(self):
+        super(LegacyDefaultDomain, self).__init__()
+
+    def _authenticate(self, remote_user, context):
+        """Use remote_user to look up the user in the identity backend."""
+        # NOTE(dolph): this unintentionally discards half the REMOTE_USER value
+        names = remote_user.split('@')
+        username = names.pop(0)
+        domain_id = CONF.identity.default_domain_id
+        user_ref = self.identity_api.get_user_by_name(username, domain_id)
+        return user_ref
+
+
+@dependency.requires('identity_api', 'resource_api')
+class LegacyDomain(Base):
+    """Deprecated. Please use keystone.auth.external.Domain instead."""
+
+    @versionutils.deprecated(
+        as_of=versionutils.deprecated.ICEHOUSE,
+        in_favor_of='keystone.auth.external.Domain',
+        remove_in=+1)
+    def __init__(self):
+        super(LegacyDomain, self).__init__()
+
+    def _authenticate(self, remote_user, context):
+        """Use remote_user to look up the user in the identity backend.
+
+        If remote_user contains an `@` assume that the substring before the
+        rightmost `@` is the username, and the substring after the @ is the
+        domain name.
+        """
+        names = remote_user.rsplit('@', 1)
+        username = names.pop(0)
+        if names:
+            domain_name = names[0]
+            domain_ref = self.resource_api.get_domain_by_name(domain_name)
+            domain_id = domain_ref['id']
+        else:
+            domain_id = CONF.identity.default_domain_id
+        user_ref = self.identity_api.get_user_by_name(username, domain_id)
+        return user_ref
diff --git a/keystone-moon/keystone/auth/plugins/mapped.py b/keystone-moon/keystone/auth/plugins/mapped.py
new file mode 100644 (file)
index 0000000..abf4448
--- /dev/null
@@ -0,0 +1,252 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+
+from oslo_log import log
+from oslo_serialization import jsonutils
+from pycadf import cadftaxonomy as taxonomy
+from six.moves.urllib import parse
+
+from keystone import auth
+from keystone.auth import plugins as auth_plugins
+from keystone.common import dependency
+from keystone.contrib import federation
+from keystone.contrib.federation import utils
+from keystone import exception
+from keystone.i18n import _
+from keystone.models import token_model
+from keystone import notifications
+
+
+LOG = log.getLogger(__name__)
+
+METHOD_NAME = 'mapped'
+
+
+@dependency.requires('assignment_api', 'federation_api', 'identity_api',
+                     'token_provider_api')
+class Mapped(auth.AuthMethodHandler):
+
+    def _get_token_ref(self, auth_payload):
+        token_id = auth_payload['id']
+        response = self.token_provider_api.validate_token(token_id)
+        return token_model.KeystoneToken(token_id=token_id,
+                                         token_data=response)
+
+    def authenticate(self, context, auth_payload, auth_context):
+        """Authenticate mapped user and return an authentication context.
+
+        :param context: keystone's request context
+        :param auth_payload: the content of the authentication for a
+                             given method
+        :param auth_context: user authentication context, a dictionary
+                             shared by all plugins.
+
+        In addition to ``user_id`` in ``auth_context``, this plugin sets
+        ``group_ids``, ``OS-FEDERATION:identity_provider`` and
+        ``OS-FEDERATION:protocol``
+
+        """
+
+        if 'id' in auth_payload:
+            token_ref = self._get_token_ref(auth_payload)
+            handle_scoped_token(context, auth_payload, auth_context, token_ref,
+                                self.federation_api,
+                                self.identity_api,
+                                self.token_provider_api)
+        else:
+            handle_unscoped_token(context, auth_payload, auth_context,
+                                  self.assignment_api, self.federation_api,
+                                  self.identity_api)
+
+
+def handle_scoped_token(context, auth_payload, auth_context, token_ref,
+                        federation_api, identity_api, token_provider_api):
+    utils.validate_expiration(token_ref)
+    token_audit_id = token_ref.audit_id
+    identity_provider = token_ref.federation_idp_id
+    protocol = token_ref.federation_protocol_id
+    user_id = token_ref.user_id
+    group_ids = token_ref.federation_group_ids
+    send_notification = functools.partial(
+        notifications.send_saml_audit_notification, 'authenticate',
+        context, user_id, group_ids, identity_provider, protocol,
+        token_audit_id)
+
+    utils.assert_enabled_identity_provider(federation_api, identity_provider)
+
+    try:
+        mapping = federation_api.get_mapping_from_idp_and_protocol(
+            identity_provider, protocol)
+        utils.validate_groups(group_ids, mapping['id'], identity_api)
+
+    except Exception:
+        # NOTE(topol): Diaper defense to catch any exception, so we can
+        # send off failed authentication notification, raise the exception
+        # after sending the notification
+        send_notification(taxonomy.OUTCOME_FAILURE)
+        raise
+    else:
+        send_notification(taxonomy.OUTCOME_SUCCESS)
+
+    auth_context['user_id'] = user_id
+    auth_context['group_ids'] = group_ids
+    auth_context[federation.IDENTITY_PROVIDER] = identity_provider
+    auth_context[federation.PROTOCOL] = protocol
+
+
+def handle_unscoped_token(context, auth_payload, auth_context,
+                          assignment_api, federation_api, identity_api):
+
+    def is_ephemeral_user(mapped_properties):
+        return mapped_properties['user']['type'] == utils.UserType.EPHEMERAL
+
+    def build_ephemeral_user_context(auth_context, user, mapped_properties,
+                                     identity_provider, protocol):
+        auth_context['user_id'] = user['id']
+        auth_context['group_ids'] = mapped_properties['group_ids']
+        auth_context[federation.IDENTITY_PROVIDER] = identity_provider
+        auth_context[federation.PROTOCOL] = protocol
+
+    def build_local_user_context(auth_context, mapped_properties):
+        user_info = auth_plugins.UserAuthInfo.create(mapped_properties,
+                                                     METHOD_NAME)
+        auth_context['user_id'] = user_info.user_id
+
+    assertion = extract_assertion_data(context)
+    identity_provider = auth_payload['identity_provider']
+    protocol = auth_payload['protocol']
+
+    utils.assert_enabled_identity_provider(federation_api, identity_provider)
+
+    group_ids = None
+    # NOTE(topol): The user is coming in from an IdP with a SAML assertion
+    # instead of from a token, so we set token_id to None
+    token_id = None
+    # NOTE(marek-denis): This variable is set to None and there is a
+    # possibility that it will be used in the CADF notification. This means
+    # operation will not be mapped to any user (even ephemeral).
+    user_id = None
+
+    try:
+        mapped_properties = apply_mapping_filter(
+            identity_provider, protocol, assertion, assignment_api,
+            federation_api, identity_api)
+
+        if is_ephemeral_user(mapped_properties):
+            user = setup_username(context, mapped_properties)
+            user_id = user['id']
+            group_ids = mapped_properties['group_ids']
+            mapping = federation_api.get_mapping_from_idp_and_protocol(
+                identity_provider, protocol)
+            utils.validate_groups_cardinality(group_ids, mapping['id'])
+            build_ephemeral_user_context(auth_context, user,
+                                         mapped_properties,
+                                         identity_provider, protocol)
+        else:
+            build_local_user_context(auth_context, mapped_properties)
+
+    except Exception:
+        # NOTE(topol): Diaper defense to catch any exception, so we can
+        # send off failed authentication notification, raise the exception
+        # after sending the notification
+        outcome = taxonomy.OUTCOME_FAILURE
+        notifications.send_saml_audit_notification('authenticate', context,
+                                                   user_id, group_ids,
+                                                   identity_provider,
+                                                   protocol, token_id,
+                                                   outcome)
+        raise
+    else:
+        outcome = taxonomy.OUTCOME_SUCCESS
+        notifications.send_saml_audit_notification('authenticate', context,
+                                                   user_id, group_ids,
+                                                   identity_provider,
+                                                   protocol, token_id,
+                                                   outcome)
+
+
+def extract_assertion_data(context):
+    assertion = dict(utils.get_assertion_params_from_env(context))
+    return assertion
+
+
+def apply_mapping_filter(identity_provider, protocol, assertion,
+                         assignment_api, federation_api, identity_api):
+    idp = federation_api.get_idp(identity_provider)
+    utils.validate_idp(idp, assertion)
+    mapping = federation_api.get_mapping_from_idp_and_protocol(
+        identity_provider, protocol)
+    rules = jsonutils.loads(mapping['rules'])
+    LOG.debug('using the following rules: %s', rules)
+    rule_processor = utils.RuleProcessor(rules)
+    mapped_properties = rule_processor.process(assertion)
+
+    # NOTE(marek-denis): We update group_ids only here to avoid fetching
+    # groups identified by name/domain twice.
+    # NOTE(marek-denis): Groups are translated from name/domain to their
+    # corresponding ids in the auth plugin, as we need information what
+    # ``mapping_id`` was used as well as idenity_api and assignment_api
+    # objects.
+    group_ids = mapped_properties['group_ids']
+    utils.validate_groups_in_backend(group_ids,
+                                     mapping['id'],
+                                     identity_api)
+    group_ids.extend(
+        utils.transform_to_group_ids(
+            mapped_properties['group_names'], mapping['id'],
+            identity_api, assignment_api))
+    mapped_properties['group_ids'] = list(set(group_ids))
+    return mapped_properties
+
+
+def setup_username(context, mapped_properties):
+    """Setup federated username.
+
+    Function covers all the cases for properly setting user id, a primary
+    identifier for identity objects. Initial version of the mapping engine
+    assumed user is identified by ``name`` and his ``id`` is built from the
+    name. We, however need to be able to accept local rules that identify user
+    by either id or name/domain.
+
+    The following use-cases are covered:
+
+    1) If neither user_name nor user_id is set raise exception.Unauthorized
+    2) If user_id is set and user_name not, set user_name equal to user_id
+    3) If user_id is not set and user_name is, set user_id as url safe version
+       of user_name.
+
+    :param context: authentication context
+    :param mapped_properties: Properties issued by a RuleProcessor.
+    :type: dictionary
+
+    :raises: exception.Unauthorized
+    :returns: dictionary with user identification
+    :rtype: dict
+
+    """
+    user = mapped_properties['user']
+
+    user_id = user.get('id')
+    user_name = user.get('name') or context['environment'].get('REMOTE_USER')
+
+    if not any([user_id, user_name]):
+        raise exception.Unauthorized(_("Could not map user"))
+
+    elif not user_name:
+        user['name'] = user_id
+
+    elif not user_id:
+        user['id'] = parse.quote(user_name)
+
+    return user
diff --git a/keystone-moon/keystone/auth/plugins/oauth1.py b/keystone-moon/keystone/auth/plugins/oauth1.py
new file mode 100644 (file)
index 0000000..2f1cc2f
--- /dev/null
@@ -0,0 +1,75 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log
+from oslo_utils import timeutils
+
+from keystone import auth
+from keystone.common import controller
+from keystone.common import dependency
+from keystone.contrib.oauth1 import core as oauth
+from keystone.contrib.oauth1 import validator
+from keystone import exception
+from keystone.i18n import _
+
+
+LOG = log.getLogger(__name__)
+
+
+@dependency.requires('oauth_api')
+class OAuth(auth.AuthMethodHandler):
+
+    method = 'oauth1'
+
+    def authenticate(self, context, auth_info, auth_context):
+        """Turn a signed request with an access key into a keystone token."""
+
+        if not self.oauth_api:
+            raise exception.Unauthorized(_('%s not supported') % self.method)
+
+        headers = context['headers']
+        oauth_headers = oauth.get_oauth_headers(headers)
+        access_token_id = oauth_headers.get('oauth_token')
+
+        if not access_token_id:
+            raise exception.ValidationError(
+                attribute='oauth_token', target='request')
+
+        acc_token = self.oauth_api.get_access_token(access_token_id)
+
+        expires_at = acc_token['expires_at']
+        if expires_at:
+            now = timeutils.utcnow()
+            expires = timeutils.normalize_time(
+                timeutils.parse_isotime(expires_at))
+            if now > expires:
+                raise exception.Unauthorized(_('Access token is expired'))
+
+        url = controller.V3Controller.base_url(context, context['path'])
+        access_verifier = oauth.ResourceEndpoint(
+            request_validator=validator.OAuthValidator(),
+            token_generator=oauth.token_generator)
+        result, request = access_verifier.validate_protected_resource_request(
+            url,
+            http_method='POST',
+            body=context['query_string'],
+            headers=headers,
+            realms=None
+        )
+        if not result:
+            msg = _('Could not validate the access token')
+            raise exception.Unauthorized(msg)
+        auth_context['user_id'] = acc_token['authorizing_user_id']
+        auth_context['access_token_id'] = access_token_id
+        auth_context['project_id'] = acc_token['project_id']
diff --git a/keystone-moon/keystone/auth/plugins/password.py b/keystone-moon/keystone/auth/plugins/password.py
new file mode 100644 (file)
index 0000000..c577044
--- /dev/null
@@ -0,0 +1,49 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log
+
+from keystone import auth
+from keystone.auth import plugins as auth_plugins
+from keystone.common import dependency
+from keystone import exception
+from keystone.i18n import _
+
+METHOD_NAME = 'password'
+
+LOG = log.getLogger(__name__)
+
+
+@dependency.requires('identity_api')
+class Password(auth.AuthMethodHandler):
+
+    method = METHOD_NAME
+
+    def authenticate(self, context, auth_payload, auth_context):
+        """Try to authenticate against the identity backend."""
+        user_info = auth_plugins.UserAuthInfo.create(auth_payload, self.method)
+
+        # FIXME(gyee): identity.authenticate() can use some refactoring since
+        # all we care is password matches
+        try:
+            self.identity_api.authenticate(
+                context,
+                user_id=user_info.user_id,
+                password=user_info.password)
+        except AssertionError:
+            # authentication failed because of invalid username or password
+            msg = _('Invalid username or password')
+            raise exception.Unauthorized(msg)
+
+        auth_context['user_id'] = user_info.user_id
diff --git a/keystone-moon/keystone/auth/plugins/saml2.py b/keystone-moon/keystone/auth/plugins/saml2.py
new file mode 100644 (file)
index 0000000..744f26a
--- /dev/null
@@ -0,0 +1,27 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.auth.plugins import mapped
+
+""" Provide an entry point to authenticate with SAML2
+
+This plugin subclasses mapped.Mapped, and may be specified in keystone.conf:
+
+  [auth]
+  methods = external,password,token,saml2
+  saml2 = keystone.auth.plugins.mapped.Mapped
+"""
+
+
+class Saml2(mapped.Mapped):
+
+    method = 'saml2'
diff --git a/keystone-moon/keystone/auth/plugins/token.py b/keystone-moon/keystone/auth/plugins/token.py
new file mode 100644 (file)
index 0000000..5ca0b25
--- /dev/null
@@ -0,0 +1,99 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+from oslo_log import log
+import six
+
+from keystone import auth
+from keystone.auth.plugins import mapped
+from keystone.common import dependency
+from keystone.common import wsgi
+from keystone import exception
+from keystone.i18n import _
+from keystone.models import token_model
+
+
+LOG = log.getLogger(__name__)
+
+CONF = cfg.CONF
+
+
+@dependency.requires('federation_api', 'identity_api', 'token_provider_api')
+class Token(auth.AuthMethodHandler):
+
+    method = 'token'
+
+    def _get_token_ref(self, auth_payload):
+        token_id = auth_payload['id']
+        response = self.token_provider_api.validate_token(token_id)
+        return token_model.KeystoneToken(token_id=token_id,
+                                         token_data=response)
+
+    def authenticate(self, context, auth_payload, user_context):
+        if 'id' not in auth_payload:
+            raise exception.ValidationError(attribute='id',
+                                            target=self.method)
+        token_ref = self._get_token_ref(auth_payload)
+        if token_ref.is_federated_user and self.federation_api:
+            mapped.handle_scoped_token(
+                context, auth_payload, user_context, token_ref,
+                self.federation_api, self.identity_api,
+                self.token_provider_api)
+        else:
+            token_authenticate(context, auth_payload, user_context, token_ref)
+
+
+def token_authenticate(context, auth_payload, user_context, token_ref):
+    try:
+
+        # Do not allow tokens used for delegation to
+        # create another token, or perform any changes of
+        # state in Keystone. To do so is to invite elevation of
+        # privilege attacks
+
+        if token_ref.oauth_scoped or token_ref.trust_scoped:
+            raise exception.Forbidden()
+
+        if not CONF.token.allow_rescope_scoped_token:
+            # Do not allow conversion from scoped tokens.
+            if token_ref.project_scoped or token_ref.domain_scoped:
+                raise exception.Forbidden(action=_("rescope a scoped token"))
+
+        wsgi.validate_token_bind(context, token_ref)
+
+        # New tokens maintain the audit_id of the original token in the
+        # chain (if possible) as the second element in the audit data
+        # structure. Look for the last element in the audit data structure
+        # which will be either the audit_id of the token (in the case of
+        # a token that has not been rescoped) or the audit_chain id (in
+        # the case of a token that has been rescoped).
+        try:
+            token_audit_id = token_ref.get('audit_ids', [])[-1]
+        except IndexError:
+            # NOTE(morganfainberg): In the case this is a token that was
+            # issued prior to audit id existing, the chain is not tracked.
+            token_audit_id = None
+
+        user_context.setdefault('expires_at', token_ref.expires)
+        user_context['audit_id'] = token_audit_id
+        user_context.setdefault('user_id', token_ref.user_id)
+        # TODO(morganfainberg: determine if token 'extras' can be removed
+        # from the user_context
+        user_context['extras'].update(token_ref.get('extras', {}))
+        user_context['method_names'].extend(token_ref.methods)
+
+    except AssertionError as e:
+        LOG.error(six.text_type(e))
+        raise exception.Unauthorized(e)
diff --git a/keystone-moon/keystone/auth/routers.py b/keystone-moon/keystone/auth/routers.py
new file mode 100644 (file)
index 0000000..c7a525c
--- /dev/null
@@ -0,0 +1,57 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.auth import controllers
+from keystone.common import json_home
+from keystone.common import wsgi
+
+
+class Routers(wsgi.RoutersBase):
+
+    def append_v3_routers(self, mapper, routers):
+        auth_controller = controllers.Auth()
+
+        self._add_resource(
+            mapper, auth_controller,
+            path='/auth/tokens',
+            get_action='validate_token',
+            head_action='check_token',
+            post_action='authenticate_for_token',
+            delete_action='revoke_token',
+            rel=json_home.build_v3_resource_relation('auth_tokens'))
+
+        self._add_resource(
+            mapper, auth_controller,
+            path='/auth/tokens/OS-PKI/revoked',
+            get_action='revocation_list',
+            rel=json_home.build_v3_extension_resource_relation(
+                'OS-PKI', '1.0', 'revocations'))
+
+        self._add_resource(
+            mapper, auth_controller,
+            path='/auth/catalog',
+            get_action='get_auth_catalog',
+            rel=json_home.build_v3_resource_relation('auth_catalog'))
+
+        self._add_resource(
+            mapper, auth_controller,
+            path='/auth/projects',
+            get_action='get_auth_projects',
+            rel=json_home.build_v3_resource_relation('auth_projects'))
+
+        self._add_resource(
+            mapper, auth_controller,
+            path='/auth/domains',
+            get_action='get_auth_domains',
+            rel=json_home.build_v3_resource_relation('auth_domains'))
diff --git a/keystone-moon/keystone/backends.py b/keystone-moon/keystone/backends.py
new file mode 100644 (file)
index 0000000..3a10675
--- /dev/null
@@ -0,0 +1,66 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone import assignment
+from keystone import auth
+from keystone import catalog
+from keystone.common import cache
+from keystone.contrib import endpoint_filter
+from keystone.contrib import endpoint_policy
+from keystone.contrib import federation
+from keystone.contrib import oauth1
+from keystone.contrib import revoke
+from keystone import credential
+from keystone import identity
+from keystone import policy
+from keystone import resource
+from keystone import token
+from keystone import trust
+# from keystone.contrib import moon
+
+
+def load_backends():
+
+    # Configure and build the cache
+    cache.configure_cache_region(cache.REGION)
+
+    # Ensure that the identity driver is created before the assignment manager.
+    # The default assignment driver is determined by the identity driver, so
+    # the identity driver must be available to the assignment manager.
+    _IDENTITY_API = identity.Manager()
+
+    DRIVERS = dict(
+        assignment_api=assignment.Manager(),
+        catalog_api=catalog.Manager(),
+        credential_api=credential.Manager(),
+        domain_config_api=resource.DomainConfigManager(),
+        endpoint_filter_api=endpoint_filter.Manager(),
+        endpoint_policy_api=endpoint_policy.Manager(),
+        federation_api=federation.Manager(),
+        id_generator_api=identity.generator.Manager(),
+        id_mapping_api=identity.MappingManager(),
+        identity_api=_IDENTITY_API,
+        oauth_api=oauth1.Manager(),
+        policy_api=policy.Manager(),
+        resource_api=resource.Manager(),
+        revoke_api=revoke.Manager(),
+        role_api=assignment.RoleManager(),
+        token_api=token.persistence.Manager(),
+        trust_api=trust.Manager(),
+        token_provider_api=token.provider.Manager(),
+        # admin_api=moon.AdminManager(),
+        # authz_api=moon.AuthzManager()
+        )
+
+    auth.controllers.load_auth_methods()
+
+    return DRIVERS
diff --git a/keystone-moon/keystone/catalog/__init__.py b/keystone-moon/keystone/catalog/__init__.py
new file mode 100644 (file)
index 0000000..8d4d156
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.catalog import controllers  # noqa
+from keystone.catalog.core import *  # noqa
+from keystone.catalog import routers  # noqa
diff --git a/keystone-moon/keystone/catalog/backends/__init__.py b/keystone-moon/keystone/catalog/backends/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/catalog/backends/kvs.py b/keystone-moon/keystone/catalog/backends/kvs.py
new file mode 100644 (file)
index 0000000..30a121d
--- /dev/null
@@ -0,0 +1,154 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from keystone import catalog
+from keystone.common import driver_hints
+from keystone.common import kvs
+
+
+class Catalog(kvs.Base, catalog.Driver):
+    # Public interface
+    def get_catalog(self, user_id, tenant_id):
+        return self.db.get('catalog-%s-%s' % (tenant_id, user_id))
+
+    # region crud
+
+    def _delete_child_regions(self, region_id, root_region_id):
+        """Delete all child regions.
+
+        Recursively delete any region that has the supplied region
+        as its parent.
+        """
+        children = [r for r in self.list_regions(driver_hints.Hints())
+                    if r['parent_region_id'] == region_id]
+        for child in children:
+            if child['id'] == root_region_id:
+                # Hit a circular region hierarchy
+                return
+            self._delete_child_regions(child['id'], root_region_id)
+            self._delete_region(child['id'])
+
+    def _check_parent_region(self, region_ref):
+        """Raise a NotFound if the parent region does not exist.
+
+        If the region_ref has a specified parent_region_id, check that
+        the parent exists, otherwise, raise a NotFound.
+        """
+        parent_region_id = region_ref.get('parent_region_id')
+        if parent_region_id is not None:
+            # This will raise NotFound if the parent doesn't exist,
+            # which is the behavior we want.
+            self.get_region(parent_region_id)
+
+    def create_region(self, region):
+        region_id = region['id']
+        region.setdefault('parent_region_id')
+        self._check_parent_region(region)
+        self.db.set('region-%s' % region_id, region)
+        region_list = set(self.db.get('region_list', []))
+        region_list.add(region_id)
+        self.db.set('region_list', list(region_list))
+        return region
+
+    def list_regions(self, hints):
+        return [self.get_region(x) for x in self.db.get('region_list', [])]
+
+    def get_region(self, region_id):
+        return self.db.get('region-%s' % region_id)
+
+    def update_region(self, region_id, region):
+        self._check_parent_region(region)
+        old_region = self.get_region(region_id)
+        old_region.update(region)
+        self._ensure_no_circle_in_hierarchical_regions(old_region)
+        self.db.set('region-%s' % region_id, old_region)
+        return old_region
+
+    def _delete_region(self, region_id):
+        self.db.delete('region-%s' % region_id)
+        region_list = set(self.db.get('region_list', []))
+        region_list.remove(region_id)
+        self.db.set('region_list', list(region_list))
+
+    def delete_region(self, region_id):
+        self._delete_child_regions(region_id, region_id)
+        self._delete_region(region_id)
+
+    # service crud
+
+    def create_service(self, service_id, service):
+        self.db.set('service-%s' % service_id, service)
+        service_list = set(self.db.get('service_list', []))
+        service_list.add(service_id)
+        self.db.set('service_list', list(service_list))
+        return service
+
+    def list_services(self, hints):
+        return [self.get_service(x) for x in self.db.get('service_list', [])]
+
+    def get_service(self, service_id):
+        return self.db.get('service-%s' % service_id)
+
+    def update_service(self, service_id, service):
+        old_service = self.get_service(service_id)
+        old_service.update(service)
+        self.db.set('service-%s' % service_id, old_service)
+        return old_service
+
+    def delete_service(self, service_id):
+        # delete referencing endpoints
+        for endpoint_id in self.db.get('endpoint_list', []):
+            if self.get_endpoint(endpoint_id)['service_id'] == service_id:
+                self.delete_endpoint(endpoint_id)
+
+        self.db.delete('service-%s' % service_id)
+        service_list = set(self.db.get('service_list', []))
+        service_list.remove(service_id)
+        self.db.set('service_list', list(service_list))
+
+    # endpoint crud
+
+    def create_endpoint(self, endpoint_id, endpoint):
+        self.db.set('endpoint-%s' % endpoint_id, endpoint)
+        endpoint_list = set(self.db.get('endpoint_list', []))
+        endpoint_list.add(endpoint_id)
+        self.db.set('endpoint_list', list(endpoint_list))
+        return endpoint
+
+    def list_endpoints(self, hints):
+        return [self.get_endpoint(x) for x in self.db.get('endpoint_list', [])]
+
+    def get_endpoint(self, endpoint_id):
+        return self.db.get('endpoint-%s' % endpoint_id)
+
+    def update_endpoint(self, endpoint_id, endpoint):
+        if endpoint.get('region_id') is not None:
+            self.get_region(endpoint['region_id'])
+
+        old_endpoint = self.get_endpoint(endpoint_id)
+        old_endpoint.update(endpoint)
+        self.db.set('endpoint-%s' % endpoint_id, old_endpoint)
+        return old_endpoint
+
+    def delete_endpoint(self, endpoint_id):
+        self.db.delete('endpoint-%s' % endpoint_id)
+        endpoint_list = set(self.db.get('endpoint_list', []))
+        endpoint_list.remove(endpoint_id)
+        self.db.set('endpoint_list', list(endpoint_list))
+
+    # Private interface
+    def _create_catalog(self, user_id, tenant_id, data):
+        self.db.set('catalog-%s-%s' % (tenant_id, user_id), data)
+        return data
diff --git a/keystone-moon/keystone/catalog/backends/sql.py b/keystone-moon/keystone/catalog/backends/sql.py
new file mode 100644 (file)
index 0000000..8ab8230
--- /dev/null
@@ -0,0 +1,337 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2012 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import itertools
+
+from oslo_config import cfg
+import six
+import sqlalchemy
+from sqlalchemy.sql import true
+
+from keystone import catalog
+from keystone.catalog import core
+from keystone.common import sql
+from keystone import exception
+
+
+CONF = cfg.CONF
+
+
+class Region(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'region'
+    attributes = ['id', 'description', 'parent_region_id']
+    id = sql.Column(sql.String(255), primary_key=True)
+    description = sql.Column(sql.String(255), nullable=False)
+    # NOTE(jaypipes): Right now, using an adjacency list model for
+    #                 storing the hierarchy of regions is fine, since
+    #                 the API does not support any kind of querying for
+    #                 more complex hierarchical queries such as "get me only
+    #                 the regions that are subchildren of this region", etc.
+    #                 If, in the future, such queries are needed, then it
+    #                 would be possible to add in columns to this model for
+    #                 "left" and "right" and provide support for a nested set
+    #                 model.
+    parent_region_id = sql.Column(sql.String(255), nullable=True)
+
+    # TODO(jaypipes): I think it's absolutely stupid that every single model
+    #                 is required to have an "extra" column because of the
+    #                 DictBase in the keystone.common.sql.core module. Forcing
+    #                 tables to have pointless columns in the database is just
+    #                 bad. Remove all of this extra JSON blob stuff.
+    #                 See: https://bugs.launchpad.net/keystone/+bug/1265071
+    extra = sql.Column(sql.JsonBlob())
+    endpoints = sqlalchemy.orm.relationship("Endpoint", backref="region")
+
+
+class Service(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'service'
+    attributes = ['id', 'type', 'enabled']
+    id = sql.Column(sql.String(64), primary_key=True)
+    type = sql.Column(sql.String(255))
+    enabled = sql.Column(sql.Boolean, nullable=False, default=True,
+                         server_default=sqlalchemy.sql.expression.true())
+    extra = sql.Column(sql.JsonBlob())
+    endpoints = sqlalchemy.orm.relationship("Endpoint", backref="service")
+
+
+class Endpoint(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'endpoint'
+    attributes = ['id', 'interface', 'region_id', 'service_id', 'url',
+                  'legacy_endpoint_id', 'enabled']
+    id = sql.Column(sql.String(64), primary_key=True)
+    legacy_endpoint_id = sql.Column(sql.String(64))
+    interface = sql.Column(sql.String(8), nullable=False)
+    region_id = sql.Column(sql.String(255),
+                           sql.ForeignKey('region.id',
+                                          ondelete='RESTRICT'),
+                           nullable=True,
+                           default=None)
+    service_id = sql.Column(sql.String(64),
+                            sql.ForeignKey('service.id'),
+                            nullable=False)
+    url = sql.Column(sql.Text(), nullable=False)
+    enabled = sql.Column(sql.Boolean, nullable=False, default=True,
+                         server_default=sqlalchemy.sql.expression.true())
+    extra = sql.Column(sql.JsonBlob())
+
+
+class Catalog(catalog.Driver):
+    # Regions
+    def list_regions(self, hints):
+        session = sql.get_session()
+        regions = session.query(Region)
+        regions = sql.filter_limit_query(Region, regions, hints)
+        return [s.to_dict() for s in list(regions)]
+
+    def _get_region(self, session, region_id):
+        ref = session.query(Region).get(region_id)
+        if not ref:
+            raise exception.RegionNotFound(region_id=region_id)
+        return ref
+
+    def _delete_child_regions(self, session, region_id, root_region_id):
+        """Delete all child regions.
+
+        Recursively delete any region that has the supplied region
+        as its parent.
+        """
+        children = session.query(Region).filter_by(parent_region_id=region_id)
+        for child in children:
+            if child.id == root_region_id:
+                # Hit a circular region hierarchy
+                return
+            self._delete_child_regions(session, child.id, root_region_id)
+            session.delete(child)
+
+    def _check_parent_region(self, session, region_ref):
+        """Raise a NotFound if the parent region does not exist.
+
+        If the region_ref has a specified parent_region_id, check that
+        the parent exists, otherwise, raise a NotFound.
+        """
+        parent_region_id = region_ref.get('parent_region_id')
+        if parent_region_id is not None:
+            # This will raise NotFound if the parent doesn't exist,
+            # which is the behavior we want.
+            self._get_region(session, parent_region_id)
+
+    def _has_endpoints(self, session, region, root_region):
+        if region.endpoints is not None and len(region.endpoints) > 0:
+            return True
+
+        q = session.query(Region)
+        q = q.filter_by(parent_region_id=region.id)
+        for child in q.all():
+            if child.id == root_region.id:
+                # Hit a circular region hierarchy
+                return False
+            if self._has_endpoints(session, child, root_region):
+                return True
+        return False
+
+    def get_region(self, region_id):
+        session = sql.get_session()
+        return self._get_region(session, region_id).to_dict()
+
+    def delete_region(self, region_id):
+        session = sql.get_session()
+        with session.begin():
+            ref = self._get_region(session, region_id)
+            if self._has_endpoints(session, ref, ref):
+                raise exception.RegionDeletionError(region_id=region_id)
+            self._delete_child_regions(session, region_id, region_id)
+            session.delete(ref)
+
+    @sql.handle_conflicts(conflict_type='region')
+    def create_region(self, region_ref):
+        session = sql.get_session()
+        with session.begin():
+            self._check_parent_region(session, region_ref)
+            region = Region.from_dict(region_ref)
+            session.add(region)
+        return region.to_dict()
+
+    def update_region(self, region_id, region_ref):
+        session = sql.get_session()
+        with session.begin():
+            self._check_parent_region(session, region_ref)
+            ref = self._get_region(session, region_id)
+            old_dict = ref.to_dict()
+            old_dict.update(region_ref)
+            self._ensure_no_circle_in_hierarchical_regions(old_dict)
+            new_region = Region.from_dict(old_dict)
+            for attr in Region.attributes:
+                if attr != 'id':
+                    setattr(ref, attr, getattr(new_region, attr))
+        return ref.to_dict()
+
+    # Services
+    @sql.truncated
+    def list_services(self, hints):
+        session = sql.get_session()
+        services = session.query(Service)
+        services = sql.filter_limit_query(Service, services, hints)
+        return [s.to_dict() for s in list(services)]
+
+    def _get_service(self, session, service_id):
+        ref = session.query(Service).get(service_id)
+        if not ref:
+            raise exception.ServiceNotFound(service_id=service_id)
+        return ref
+
+    def get_service(self, service_id):
+        session = sql.get_session()
+        return self._get_service(session, service_id).to_dict()
+
+    def delete_service(self, service_id):
+        session = sql.get_session()
+        with session.begin():
+            ref = self._get_service(session, service_id)
+            session.query(Endpoint).filter_by(service_id=service_id).delete()
+            session.delete(ref)
+
+    def create_service(self, service_id, service_ref):
+        session = sql.get_session()
+        with session.begin():
+            service = Service.from_dict(service_ref)
+            session.add(service)
+        return service.to_dict()
+
+    def update_service(self, service_id, service_ref):
+        session = sql.get_session()
+        with session.begin():
+            ref = self._get_service(session, service_id)
+            old_dict = ref.to_dict()
+            old_dict.update(service_ref)
+            new_service = Service.from_dict(old_dict)
+            for attr in Service.attributes:
+                if attr != 'id':
+                    setattr(ref, attr, getattr(new_service, attr))
+            ref.extra = new_service.extra
+        return ref.to_dict()
+
+    # Endpoints
+    def create_endpoint(self, endpoint_id, endpoint_ref):
+        session = sql.get_session()
+        new_endpoint = Endpoint.from_dict(endpoint_ref)
+
+        with session.begin():
+            session.add(new_endpoint)
+        return new_endpoint.to_dict()
+
+    def delete_endpoint(self, endpoint_id):
+        session = sql.get_session()
+        with session.begin():
+            ref = self._get_endpoint(session, endpoint_id)
+            session.delete(ref)
+
+    def _get_endpoint(self, session, endpoint_id):
+        try:
+            return session.query(Endpoint).filter_by(id=endpoint_id).one()
+        except sql.NotFound:
+            raise exception.EndpointNotFound(endpoint_id=endpoint_id)
+
+    def get_endpoint(self, endpoint_id):
+        session = sql.get_session()
+        return self._get_endpoint(session, endpoint_id).to_dict()
+
+    @sql.truncated
+    def list_endpoints(self, hints):
+        session = sql.get_session()
+        endpoints = session.query(Endpoint)
+        endpoints = sql.filter_limit_query(Endpoint, endpoints, hints)
+        return [e.to_dict() for e in list(endpoints)]
+
+    def update_endpoint(self, endpoint_id, endpoint_ref):
+        session = sql.get_session()
+
+        with session.begin():
+            ref = self._get_endpoint(session, endpoint_id)
+            old_dict = ref.to_dict()
+            old_dict.update(endpoint_ref)
+            new_endpoint = Endpoint.from_dict(old_dict)
+            for attr in Endpoint.attributes:
+                if attr != 'id':
+                    setattr(ref, attr, getattr(new_endpoint, attr))
+            ref.extra = new_endpoint.extra
+        return ref.to_dict()
+
+    def get_catalog(self, user_id, tenant_id):
+        substitutions = dict(
+            itertools.chain(six.iteritems(CONF),
+                            six.iteritems(CONF.eventlet_server)))
+        substitutions.update({'tenant_id': tenant_id, 'user_id': user_id})
+
+        session = sql.get_session()
+        endpoints = (session.query(Endpoint).
+                     options(sql.joinedload(Endpoint.service)).
+                     filter(Endpoint.enabled == true()).all())
+
+        catalog = {}
+
+        for endpoint in endpoints:
+            if not endpoint.service['enabled']:
+                continue
+            try:
+                url = core.format_url(endpoint['url'], substitutions)
+            except exception.MalformedEndpoint:
+                continue  # this failure is already logged in format_url()
+
+            region = endpoint['region_id']
+            service_type = endpoint.service['type']
+            default_service = {
+                'id': endpoint['id'],
+                'name': endpoint.service.extra.get('name', ''),
+                'publicURL': ''
+            }
+            catalog.setdefault(region, {})
+            catalog[region].setdefault(service_type, default_service)
+            interface_url = '%sURL' % endpoint['interface']
+            catalog[region][service_type][interface_url] = url
+
+        return catalog
+
+    def get_v3_catalog(self, user_id, tenant_id):
+        d = dict(
+            itertools.chain(six.iteritems(CONF),
+                            six.iteritems(CONF.eventlet_server)))
+        d.update({'tenant_id': tenant_id,
+                  'user_id': user_id})
+
+        session = sql.get_session()
+        services = (session.query(Service).filter(Service.enabled == true()).
+                    options(sql.joinedload(Service.endpoints)).
+                    all())
+
+        def make_v3_endpoints(endpoints):
+            for endpoint in (ep.to_dict() for ep in endpoints if ep.enabled):
+                del endpoint['service_id']
+                del endpoint['legacy_endpoint_id']
+                del endpoint['enabled']
+                endpoint['region'] = endpoint['region_id']
+                try:
+                    endpoint['url'] = core.format_url(endpoint['url'], d)
+                except exception.MalformedEndpoint:
+                    continue  # this failure is already logged in format_url()
+
+                yield endpoint
+
+        def make_v3_service(svc):
+            eps = list(make_v3_endpoints(svc.endpoints))
+            service = {'endpoints': eps, 'id': svc.id, 'type': svc.type}
+            service['name'] = svc.extra.get('name', '')
+            return service
+
+        return [make_v3_service(svc) for svc in services]
diff --git a/keystone-moon/keystone/catalog/backends/templated.py b/keystone-moon/keystone/catalog/backends/templated.py
new file mode 100644 (file)
index 0000000..d3ee105
--- /dev/null
@@ -0,0 +1,127 @@
+# Copyright 2012 OpenStack Foundationc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import itertools
+import os.path
+
+from oslo_config import cfg
+from oslo_log import log
+import six
+
+from keystone.catalog.backends import kvs
+from keystone.catalog import core
+from keystone import exception
+from keystone.i18n import _LC
+
+
+LOG = log.getLogger(__name__)
+
+CONF = cfg.CONF
+
+
+def parse_templates(template_lines):
+    o = {}
+    for line in template_lines:
+        if ' = ' not in line:
+            continue
+
+        k, v = line.strip().split(' = ')
+        if not k.startswith('catalog.'):
+            continue
+
+        parts = k.split('.')
+
+        region = parts[1]
+        # NOTE(termie): object-store insists on having a dash
+        service = parts[2].replace('_', '-')
+        key = parts[3]
+
+        region_ref = o.get(region, {})
+        service_ref = region_ref.get(service, {})
+        service_ref[key] = v
+
+        region_ref[service] = service_ref
+        o[region] = region_ref
+
+    return o
+
+
+class Catalog(kvs.Catalog):
+    """A backend that generates endpoints for the Catalog based on templates.
+
+    It is usually configured via config entries that look like:
+
+      catalog.$REGION.$SERVICE.$key = $value
+
+    and is stored in a similar looking hierarchy. Where a value can contain
+    values to be interpolated by standard python string interpolation that look
+    like (the % is replaced by a $ due to paste attempting to interpolate on
+    its own:
+
+      http://localhost:$(public_port)s/
+
+    When expanding the template it will pass in a dict made up of the conf
+    instance plus a few additional key-values, notably tenant_id and user_id.
+
+    It does not care what the keys and values are but it is worth noting that
+    keystone_compat will expect certain keys to be there so that it can munge
+    them into the output format keystone expects. These keys are:
+
+      name - the name of the service, most likely repeated for all services of
+             the same type, across regions.
+
+      adminURL - the url of the admin endpoint
+
+      publicURL - the url of the public endpoint
+
+      internalURL - the url of the internal endpoint
+
+    """
+
+    def __init__(self, templates=None):
+        super(Catalog, self).__init__()
+        if templates:
+            self.templates = templates
+        else:
+            template_file = CONF.catalog.template_file
+            if not os.path.exists(template_file):
+                template_file = CONF.find_file(template_file)
+            self._load_templates(template_file)
+
+    def _load_templates(self, template_file):
+        try:
+            self.templates = parse_templates(open(template_file))
+        except IOError:
+            LOG.critical(_LC('Unable to open template file %s'), template_file)
+            raise
+
+    def get_catalog(self, user_id, tenant_id):
+        substitutions = dict(
+            itertools.chain(six.iteritems(CONF),
+                            six.iteritems(CONF.eventlet_server)))
+        substitutions.update({'tenant_id': tenant_id, 'user_id': user_id})
+
+        catalog = {}
+        for region, region_ref in six.iteritems(self.templates):
+            catalog[region] = {}
+            for service, service_ref in six.iteritems(region_ref):
+                service_data = {}
+                try:
+                    for k, v in six.iteritems(service_ref):
+                        service_data[k] = core.format_url(v, substitutions)
+                except exception.MalformedEndpoint:
+                    continue  # this failure is already logged in format_url()
+                catalog[region][service] = service_data
+
+        return catalog
diff --git a/keystone-moon/keystone/catalog/controllers.py b/keystone-moon/keystone/catalog/controllers.py
new file mode 100644 (file)
index 0000000..3518c4b
--- /dev/null
@@ -0,0 +1,336 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2012 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import six
+
+from keystone.catalog import schema
+from keystone.common import controller
+from keystone.common import dependency
+from keystone.common import validation
+from keystone.common import wsgi
+from keystone import exception
+from keystone.i18n import _
+from keystone import notifications
+
+
+INTERFACES = ['public', 'internal', 'admin']
+
+
+@dependency.requires('catalog_api')
+class Service(controller.V2Controller):
+
+    @controller.v2_deprecated
+    def get_services(self, context):
+        self.assert_admin(context)
+        service_list = self.catalog_api.list_services()
+        return {'OS-KSADM:services': service_list}
+
+    @controller.v2_deprecated
+    def get_service(self, context, service_id):
+        self.assert_admin(context)
+        service_ref = self.catalog_api.get_service(service_id)
+        return {'OS-KSADM:service': service_ref}
+
+    @controller.v2_deprecated
+    def delete_service(self, context, service_id):
+        self.assert_admin(context)
+        self.catalog_api.delete_service(service_id)
+
+    @controller.v2_deprecated
+    def create_service(self, context, OS_KSADM_service):
+        self.assert_admin(context)
+        service_id = uuid.uuid4().hex
+        service_ref = OS_KSADM_service.copy()
+        service_ref['id'] = service_id
+        new_service_ref = self.catalog_api.create_service(
+            service_id, service_ref)
+        return {'OS-KSADM:service': new_service_ref}
+
+
+@dependency.requires('catalog_api')
+class Endpoint(controller.V2Controller):
+
+    @controller.v2_deprecated
+    def get_endpoints(self, context):
+        """Merge matching v3 endpoint refs into legacy refs."""
+        self.assert_admin(context)
+        legacy_endpoints = {}
+        for endpoint in self.catalog_api.list_endpoints():
+            if not endpoint.get('legacy_endpoint_id'):
+                # endpoints created in v3 should not appear on the v2 API
+                continue
+
+            # is this is a legacy endpoint we haven't indexed yet?
+            if endpoint['legacy_endpoint_id'] not in legacy_endpoints:
+                legacy_ep = endpoint.copy()
+                legacy_ep['id'] = legacy_ep.pop('legacy_endpoint_id')
+                legacy_ep.pop('interface')
+                legacy_ep.pop('url')
+                legacy_ep['region'] = legacy_ep.pop('region_id')
+
+                legacy_endpoints[endpoint['legacy_endpoint_id']] = legacy_ep
+            else:
+                legacy_ep = legacy_endpoints[endpoint['legacy_endpoint_id']]
+
+            # add the legacy endpoint with an interface url
+            legacy_ep['%surl' % endpoint['interface']] = endpoint['url']
+        return {'endpoints': legacy_endpoints.values()}
+
+    @controller.v2_deprecated
+    def create_endpoint(self, context, endpoint):
+        """Create three v3 endpoint refs based on a legacy ref."""
+        self.assert_admin(context)
+
+        # according to the v2 spec publicurl is mandatory
+        self._require_attribute(endpoint, 'publicurl')
+        # service_id is necessary
+        self._require_attribute(endpoint, 'service_id')
+
+        initiator = notifications._get_request_audit_info(context)
+
+        if endpoint.get('region') is not None:
+            try:
+                self.catalog_api.get_region(endpoint['region'])
+            except exception.RegionNotFound:
+                region = dict(id=endpoint['region'])
+                self.catalog_api.create_region(region, initiator)
+
+        legacy_endpoint_ref = endpoint.copy()
+
+        urls = {}
+        for i in INTERFACES:
+            # remove all urls so they aren't persisted them more than once
+            url = '%surl' % i
+            if endpoint.get(url):
+                # valid urls need to be persisted
+                urls[i] = endpoint.pop(url)
+            elif url in endpoint:
+                # null or empty urls can be discarded
+                endpoint.pop(url)
+                legacy_endpoint_ref.pop(url)
+
+        legacy_endpoint_id = uuid.uuid4().hex
+        for interface, url in six.iteritems(urls):
+            endpoint_ref = endpoint.copy()
+            endpoint_ref['id'] = uuid.uuid4().hex
+            endpoint_ref['legacy_endpoint_id'] = legacy_endpoint_id
+            endpoint_ref['interface'] = interface
+            endpoint_ref['url'] = url
+            endpoint_ref['region_id'] = endpoint_ref.pop('region')
+            self.catalog_api.create_endpoint(endpoint_ref['id'], endpoint_ref,
+                                             initiator)
+
+        legacy_endpoint_ref['id'] = legacy_endpoint_id
+        return {'endpoint': legacy_endpoint_ref}
+
+    @controller.v2_deprecated
+    def delete_endpoint(self, context, endpoint_id):
+        """Delete up to three v3 endpoint refs based on a legacy ref ID."""
+        self.assert_admin(context)
+
+        deleted_at_least_one = False
+        for endpoint in self.catalog_api.list_endpoints():
+            if endpoint['legacy_endpoint_id'] == endpoint_id:
+                self.catalog_api.delete_endpoint(endpoint['id'])
+                deleted_at_least_one = True
+
+        if not deleted_at_least_one:
+            raise exception.EndpointNotFound(endpoint_id=endpoint_id)
+
+
+@dependency.requires('catalog_api')
+class RegionV3(controller.V3Controller):
+    collection_name = 'regions'
+    member_name = 'region'
+
+    def create_region_with_id(self, context, region_id, region):
+        """Create a region with a user-specified ID.
+
+        This method is unprotected because it depends on ``self.create_region``
+        to enforce policy.
+        """
+        if 'id' in region and region_id != region['id']:
+            raise exception.ValidationError(
+                _('Conflicting region IDs specified: '
+                  '"%(url_id)s" != "%(ref_id)s"') % {
+                      'url_id': region_id,
+                      'ref_id': region['id']})
+        region['id'] = region_id
+        return self.create_region(context, region)
+
+    @controller.protected()
+    @validation.validated(schema.region_create, 'region')
+    def create_region(self, context, region):
+        ref = self._normalize_dict(region)
+
+        if not ref.get('id'):
+            ref = self._assign_unique_id(ref)
+
+        initiator = notifications._get_request_audit_info(context)
+        ref = self.catalog_api.create_region(ref, initiator)
+        return wsgi.render_response(
+            RegionV3.wrap_member(context, ref),
+            status=(201, 'Created'))
+
+    @controller.filterprotected('parent_region_id')
+    def list_regions(self, context, filters):
+        hints = RegionV3.build_driver_hints(context, filters)
+        refs = self.catalog_api.list_regions(hints)
+        return RegionV3.wrap_collection(context, refs, hints=hints)
+
+    @controller.protected()
+    def get_region(self, context, region_id):
+        ref = self.catalog_api.get_region(region_id)
+        return RegionV3.wrap_member(context, ref)
+
+    @controller.protected()
+    @validation.validated(schema.region_update, 'region')
+    def update_region(self, context, region_id, region):
+        self._require_matching_id(region_id, region)
+        initiator = notifications._get_request_audit_info(context)
+        ref = self.catalog_api.update_region(region_id, region, initiator)
+        return RegionV3.wrap_member(context, ref)
+
+    @controller.protected()
+    def delete_region(self, context, region_id):
+        initiator = notifications._get_request_audit_info(context)
+        return self.catalog_api.delete_region(region_id, initiator)
+
+
+@dependency.requires('catalog_api')
+class ServiceV3(controller.V3Controller):
+    collection_name = 'services'
+    member_name = 'service'
+
+    def __init__(self):
+        super(ServiceV3, self).__init__()
+        self.get_member_from_driver = self.catalog_api.get_service
+
+    @controller.protected()
+    @validation.validated(schema.service_create, 'service')
+    def create_service(self, context, service):
+        ref = self._assign_unique_id(self._normalize_dict(service))
+        initiator = notifications._get_request_audit_info(context)
+        ref = self.catalog_api.create_service(ref['id'], ref, initiator)
+        return ServiceV3.wrap_member(context, ref)
+
+    @controller.filterprotected('type', 'name')
+    def list_services(self, context, filters):
+        hints = ServiceV3.build_driver_hints(context, filters)
+        refs = self.catalog_api.list_services(hints=hints)
+        return ServiceV3.wrap_collection(context, refs, hints=hints)
+
+    @controller.protected()
+    def get_service(self, context, service_id):
+        ref = self.catalog_api.get_service(service_id)
+        return ServiceV3.wrap_member(context, ref)
+
+    @controller.protected()
+    @validation.validated(schema.service_update, 'service')
+    def update_service(self, context, service_id, service):
+        self._require_matching_id(service_id, service)
+        initiator = notifications._get_request_audit_info(context)
+        ref = self.catalog_api.update_service(service_id, service, initiator)
+        return ServiceV3.wrap_member(context, ref)
+
+    @controller.protected()
+    def delete_service(self, context, service_id):
+        initiator = notifications._get_request_audit_info(context)
+        return self.catalog_api.delete_service(service_id, initiator)
+
+
+@dependency.requires('catalog_api')
+class EndpointV3(controller.V3Controller):
+    collection_name = 'endpoints'
+    member_name = 'endpoint'
+
+    def __init__(self):
+        super(EndpointV3, self).__init__()
+        self.get_member_from_driver = self.catalog_api.get_endpoint
+
+    @classmethod
+    def filter_endpoint(cls, ref):
+        if 'legacy_endpoint_id' in ref:
+            ref.pop('legacy_endpoint_id')
+        ref['region'] = ref['region_id']
+        return ref
+
+    @classmethod
+    def wrap_member(cls, context, ref):
+        ref = cls.filter_endpoint(ref)
+        return super(EndpointV3, cls).wrap_member(context, ref)
+
+    def _validate_endpoint_region(self, endpoint, context=None):
+        """Ensure the region for the endpoint exists.
+
+        If 'region_id' is used to specify the region, then we will let the
+        manager/driver take care of this.  If, however, 'region' is used,
+        then for backward compatibility, we will auto-create the region.
+
+        """
+        if (endpoint.get('region_id') is None and
+                endpoint.get('region') is not None):
+            # To maintain backward compatibility with clients that are
+            # using the v3 API in the same way as they used the v2 API,
+            # create the endpoint region, if that region does not exist
+            # in keystone.
+            endpoint['region_id'] = endpoint.pop('region')
+            try:
+                self.catalog_api.get_region(endpoint['region_id'])
+            except exception.RegionNotFound:
+                region = dict(id=endpoint['region_id'])
+                initiator = notifications._get_request_audit_info(context)
+                self.catalog_api.create_region(region, initiator)
+
+        return endpoint
+
+    @controller.protected()
+    @validation.validated(schema.endpoint_create, 'endpoint')
+    def create_endpoint(self, context, endpoint):
+        ref = self._assign_unique_id(self._normalize_dict(endpoint))
+        ref = self._validate_endpoint_region(ref, context)
+        initiator = notifications._get_request_audit_info(context)
+        ref = self.catalog_api.create_endpoint(ref['id'], ref, initiator)
+        return EndpointV3.wrap_member(context, ref)
+
+    @controller.filterprotected('interface', 'service_id')
+    def list_endpoints(self, context, filters):
+        hints = EndpointV3.build_driver_hints(context, filters)
+        refs = self.catalog_api.list_endpoints(hints=hints)
+        return EndpointV3.wrap_collection(context, refs, hints=hints)
+
+    @controller.protected()
+    def get_endpoint(self, context, endpoint_id):
+        ref = self.catalog_api.get_endpoint(endpoint_id)
+        return EndpointV3.wrap_member(context, ref)
+
+    @controller.protected()
+    @validation.validated(schema.endpoint_update, 'endpoint')
+    def update_endpoint(self, context, endpoint_id, endpoint):
+        self._require_matching_id(endpoint_id, endpoint)
+
+        endpoint = self._validate_endpoint_region(endpoint.copy(), context)
+
+        initiator = notifications._get_request_audit_info(context)
+        ref = self.catalog_api.update_endpoint(endpoint_id, endpoint,
+                                               initiator)
+        return EndpointV3.wrap_member(context, ref)
+
+    @controller.protected()
+    def delete_endpoint(self, context, endpoint_id):
+        initiator = notifications._get_request_audit_info(context)
+        return self.catalog_api.delete_endpoint(endpoint_id, initiator)
diff --git a/keystone-moon/keystone/catalog/core.py b/keystone-moon/keystone/catalog/core.py
new file mode 100644 (file)
index 0000000..fba26b8
--- /dev/null
@@ -0,0 +1,506 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2012 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Main entry point into the Catalog service."""
+
+import abc
+
+from oslo_config import cfg
+from oslo_log import log
+import six
+
+from keystone.common import cache
+from keystone.common import dependency
+from keystone.common import driver_hints
+from keystone.common import manager
+from keystone.common import utils
+from keystone import exception
+from keystone.i18n import _
+from keystone.i18n import _LE
+from keystone import notifications
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+MEMOIZE = cache.get_memoization_decorator(section='catalog')
+
+
+def format_url(url, substitutions):
+    """Formats a user-defined URL with the given substitutions.
+
+    :param string url: the URL to be formatted
+    :param dict substitutions: the dictionary used for substitution
+    :returns: a formatted URL
+
+    """
+
+    WHITELISTED_PROPERTIES = [
+        'tenant_id', 'user_id', 'public_bind_host', 'admin_bind_host',
+        'compute_host', 'compute_port', 'admin_port', 'public_port',
+        'public_endpoint', 'admin_endpoint', ]
+
+    substitutions = utils.WhiteListedItemFilter(
+        WHITELISTED_PROPERTIES,
+        substitutions)
+    try:
+        result = url.replace('$(', '%(') % substitutions
+    except AttributeError:
+        LOG.error(_LE('Malformed endpoint - %(url)r is not a string'),
+                  {"url": url})
+        raise exception.MalformedEndpoint(endpoint=url)
+    except KeyError as e:
+        LOG.error(_LE("Malformed endpoint %(url)s - unknown key %(keyerror)s"),
+                  {"url": url,
+                   "keyerror": e})
+        raise exception.MalformedEndpoint(endpoint=url)
+    except TypeError as e:
+        LOG.error(_LE("Malformed endpoint '%(url)s'. The following type error "
+                      "occurred during string substitution: %(typeerror)s"),
+                  {"url": url,
+                   "typeerror": e})
+        raise exception.MalformedEndpoint(endpoint=url)
+    except ValueError as e:
+        LOG.error(_LE("Malformed endpoint %s - incomplete format "
+                      "(are you missing a type notifier ?)"), url)
+        raise exception.MalformedEndpoint(endpoint=url)
+    return result
+
+
+@dependency.provider('catalog_api')
+class Manager(manager.Manager):
+    """Default pivot point for the Catalog backend.
+
+    See :mod:`keystone.common.manager.Manager` for more details on how this
+    dynamically calls the backend.
+
+    """
+    _ENDPOINT = 'endpoint'
+    _SERVICE = 'service'
+    _REGION = 'region'
+
+    def __init__(self):
+        super(Manager, self).__init__(CONF.catalog.driver)
+
+    def create_region(self, region_ref, initiator=None):
+        # Check duplicate ID
+        try:
+            self.get_region(region_ref['id'])
+        except exception.RegionNotFound:
+            pass
+        else:
+            msg = _('Duplicate ID, %s.') % region_ref['id']
+            raise exception.Conflict(type='region', details=msg)
+
+        # NOTE(lbragstad): The description column of the region database
+        # can not be null. So if the user doesn't pass in a description then
+        # set it to an empty string.
+        region_ref.setdefault('description', '')
+        try:
+            ret = self.driver.create_region(region_ref)
+        except exception.NotFound:
+            parent_region_id = region_ref.get('parent_region_id')
+            raise exception.RegionNotFound(region_id=parent_region_id)
+
+        notifications.Audit.created(self._REGION, ret['id'], initiator)
+        return ret
+
+    @MEMOIZE
+    def get_region(self, region_id):
+        try:
+            return self.driver.get_region(region_id)
+        except exception.NotFound:
+            raise exception.RegionNotFound(region_id=region_id)
+
+    def update_region(self, region_id, region_ref, initiator=None):
+        ref = self.driver.update_region(region_id, region_ref)
+        notifications.Audit.updated(self._REGION, region_id, initiator)
+        self.get_region.invalidate(self, region_id)
+        return ref
+
+    def delete_region(self, region_id, initiator=None):
+        try:
+            ret = self.driver.delete_region(region_id)
+            notifications.Audit.deleted(self._REGION, region_id, initiator)
+            self.get_region.invalidate(self, region_id)
+            return ret
+        except exception.NotFound:
+            raise exception.RegionNotFound(region_id=region_id)
+
+    @manager.response_truncated
+    def list_regions(self, hints=None):
+        return self.driver.list_regions(hints or driver_hints.Hints())
+
+    def create_service(self, service_id, service_ref, initiator=None):
+        service_ref.setdefault('enabled', True)
+        service_ref.setdefault('name', '')
+        ref = self.driver.create_service(service_id, service_ref)
+        notifications.Audit.created(self._SERVICE, service_id, initiator)
+        return ref
+
+    @MEMOIZE
+    def get_service(self, service_id):
+        try:
+            return self.driver.get_service(service_id)
+        except exception.NotFound:
+            raise exception.ServiceNotFound(service_id=service_id)
+
+    def update_service(self, service_id, service_ref, initiator=None):
+        ref = self.driver.update_service(service_id, service_ref)
+        notifications.Audit.updated(self._SERVICE, service_id, initiator)
+        self.get_service.invalidate(self, service_id)
+        return ref
+
+    def delete_service(self, service_id, initiator=None):
+        try:
+            endpoints = self.list_endpoints()
+            ret = self.driver.delete_service(service_id)
+            notifications.Audit.deleted(self._SERVICE, service_id, initiator)
+            self.get_service.invalidate(self, service_id)
+            for endpoint in endpoints:
+                if endpoint['service_id'] == service_id:
+                    self.get_endpoint.invalidate(self, endpoint['id'])
+            return ret
+        except exception.NotFound:
+            raise exception.ServiceNotFound(service_id=service_id)
+
+    @manager.response_truncated
+    def list_services(self, hints=None):
+        return self.driver.list_services(hints or driver_hints.Hints())
+
+    def _assert_region_exists(self, region_id):
+        try:
+            if region_id is not None:
+                self.get_region(region_id)
+        except exception.RegionNotFound:
+            raise exception.ValidationError(attribute='endpoint region_id',
+                                            target='region table')
+
+    def _assert_service_exists(self, service_id):
+        try:
+            if service_id is not None:
+                self.get_service(service_id)
+        except exception.ServiceNotFound:
+            raise exception.ValidationError(attribute='endpoint service_id',
+                                            target='service table')
+
+    def create_endpoint(self, endpoint_id, endpoint_ref, initiator=None):
+        self._assert_region_exists(endpoint_ref.get('region_id'))
+        self._assert_service_exists(endpoint_ref['service_id'])
+        ref = self.driver.create_endpoint(endpoint_id, endpoint_ref)
+
+        notifications.Audit.created(self._ENDPOINT, endpoint_id, initiator)
+        return ref
+
+    def update_endpoint(self, endpoint_id, endpoint_ref, initiator=None):
+        self._assert_region_exists(endpoint_ref.get('region_id'))
+        self._assert_service_exists(endpoint_ref.get('service_id'))
+        ref = self.driver.update_endpoint(endpoint_id, endpoint_ref)
+        notifications.Audit.updated(self._ENDPOINT, endpoint_id, initiator)
+        self.get_endpoint.invalidate(self, endpoint_id)
+        return ref
+
+    def delete_endpoint(self, endpoint_id, initiator=None):
+        try:
+            ret = self.driver.delete_endpoint(endpoint_id)
+            notifications.Audit.deleted(self._ENDPOINT, endpoint_id, initiator)
+            self.get_endpoint.invalidate(self, endpoint_id)
+            return ret
+        except exception.NotFound:
+            raise exception.EndpointNotFound(endpoint_id=endpoint_id)
+
+    @MEMOIZE
+    def get_endpoint(self, endpoint_id):
+        try:
+            return self.driver.get_endpoint(endpoint_id)
+        except exception.NotFound:
+            raise exception.EndpointNotFound(endpoint_id=endpoint_id)
+
+    @manager.response_truncated
+    def list_endpoints(self, hints=None):
+        return self.driver.list_endpoints(hints or driver_hints.Hints())
+
+    def get_catalog(self, user_id, tenant_id):
+        try:
+            return self.driver.get_catalog(user_id, tenant_id)
+        except exception.NotFound:
+            raise exception.NotFound('Catalog not found for user and tenant')
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Driver(object):
+    """Interface description for an Catalog driver."""
+
+    def _get_list_limit(self):
+        return CONF.catalog.list_limit or CONF.list_limit
+
+    def _ensure_no_circle_in_hierarchical_regions(self, region_ref):
+        if region_ref.get('parent_region_id') is None:
+            return
+
+        root_region_id = region_ref['id']
+        parent_region_id = region_ref['parent_region_id']
+
+        while parent_region_id:
+            # NOTE(wanghong): check before getting parent region can ensure no
+            # self circle
+            if parent_region_id == root_region_id:
+                raise exception.CircularRegionHierarchyError(
+                    parent_region_id=parent_region_id)
+            parent_region = self.get_region(parent_region_id)
+            parent_region_id = parent_region.get('parent_region_id')
+
+    @abc.abstractmethod
+    def create_region(self, region_ref):
+        """Creates a new region.
+
+        :raises: keystone.exception.Conflict
+        :raises: keystone.exception.RegionNotFound (if parent region invalid)
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_regions(self, hints):
+        """List all regions.
+
+        :param hints: contains the list of filters yet to be satisfied.
+                      Any filters satisfied here will be removed so that
+                      the caller will know if any filters remain.
+
+        :returns: list of region_refs or an empty list.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_region(self, region_id):
+        """Get region by id.
+
+        :returns: region_ref dict
+        :raises: keystone.exception.RegionNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def update_region(self, region_id, region_ref):
+        """Update region by id.
+
+        :returns: region_ref dict
+        :raises: keystone.exception.RegionNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_region(self, region_id):
+        """Deletes an existing region.
+
+        :raises: keystone.exception.RegionNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def create_service(self, service_id, service_ref):
+        """Creates a new service.
+
+        :raises: keystone.exception.Conflict
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_services(self, hints):
+        """List all services.
+
+        :param hints: contains the list of filters yet to be satisfied.
+                      Any filters satisfied here will be removed so that
+                      the caller will know if any filters remain.
+
+        :returns: list of service_refs or an empty list.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_service(self, service_id):
+        """Get service by id.
+
+        :returns: service_ref dict
+        :raises: keystone.exception.ServiceNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def update_service(self, service_id, service_ref):
+        """Update service by id.
+
+        :returns: service_ref dict
+        :raises: keystone.exception.ServiceNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_service(self, service_id):
+        """Deletes an existing service.
+
+        :raises: keystone.exception.ServiceNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def create_endpoint(self, endpoint_id, endpoint_ref):
+        """Creates a new endpoint for a service.
+
+        :raises: keystone.exception.Conflict,
+                 keystone.exception.ServiceNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_endpoint(self, endpoint_id):
+        """Get endpoint by id.
+
+        :returns: endpoint_ref dict
+        :raises: keystone.exception.EndpointNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_endpoints(self, hints):
+        """List all endpoints.
+
+        :param hints: contains the list of filters yet to be satisfied.
+                      Any filters satisfied here will be removed so that
+                      the caller will know if any filters remain.
+
+        :returns: list of endpoint_refs or an empty list.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def update_endpoint(self, endpoint_id, endpoint_ref):
+        """Get endpoint by id.
+
+        :returns: endpoint_ref dict
+        :raises: keystone.exception.EndpointNotFound
+                 keystone.exception.ServiceNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_endpoint(self, endpoint_id):
+        """Deletes an endpoint for a service.
+
+        :raises: keystone.exception.EndpointNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_catalog(self, user_id, tenant_id):
+        """Retrieve and format the current service catalog.
+
+        Example::
+
+            { 'RegionOne':
+                {'compute': {
+                    'adminURL': u'http://host:8774/v1.1/tenantid',
+                    'internalURL': u'http://host:8774/v1.1/tenant_id',
+                    'name': 'Compute Service',
+                    'publicURL': u'http://host:8774/v1.1/tenantid'},
+                 'ec2': {
+                    'adminURL': 'http://host:8773/services/Admin',
+                    'internalURL': 'http://host:8773/services/Cloud',
+                    'name': 'EC2 Service',
+                    'publicURL': 'http://host:8773/services/Cloud'}}
+
+        :returns: A nested dict representing the service catalog or an
+                  empty dict.
+        :raises: keystone.exception.NotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def get_v3_catalog(self, user_id, tenant_id):
+        """Retrieve and format the current V3 service catalog.
+
+        The default implementation builds the V3 catalog from the V2 catalog.
+
+        Example::
+
+            [
+                {
+                    "endpoints": [
+                    {
+                        "interface": "public",
+                        "id": "--endpoint-id--",
+                        "region": "RegionOne",
+                        "url": "http://external:8776/v1/--project-id--"
+                    },
+                    {
+                        "interface": "internal",
+                        "id": "--endpoint-id--",
+                        "region": "RegionOne",
+                        "url": "http://internal:8776/v1/--project-id--"
+                    }],
+                "id": "--service-id--",
+                "type": "volume"
+            }]
+
+        :returns: A list representing the service catalog or an empty list
+        :raises: keystone.exception.NotFound
+
+        """
+        v2_catalog = self.get_catalog(user_id, tenant_id)
+        v3_catalog = []
+
+        for region_name, region in six.iteritems(v2_catalog):
+            for service_type, service in six.iteritems(region):
+                service_v3 = {
+                    'type': service_type,
+                    'endpoints': []
+                }
+
+                for attr, value in six.iteritems(service):
+                    # Attributes that end in URL are interfaces. In the V2
+                    # catalog, these are internalURL, publicURL, and adminURL.
+                    # For example, <region_name>.publicURL=<URL> in the V2
+                    # catalog becomes the V3 interface for the service:
+                    # { 'interface': 'public', 'url': '<URL>', 'region':
+                    #   'region: '<region_name>' }
+                    if attr.endswith('URL'):
+                        v3_interface = attr[:-len('URL')]
+                        service_v3['endpoints'].append({
+                            'interface': v3_interface,
+                            'region': region_name,
+                            'url': value,
+                        })
+                        continue
+
+                    # Other attributes are copied to the service.
+                    service_v3[attr] = value
+
+                v3_catalog.append(service_v3)
+
+        return v3_catalog
diff --git a/keystone-moon/keystone/catalog/routers.py b/keystone-moon/keystone/catalog/routers.py
new file mode 100644 (file)
index 0000000..f3bd988
--- /dev/null
@@ -0,0 +1,40 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.catalog import controllers
+from keystone.common import router
+from keystone.common import wsgi
+
+
+class Routers(wsgi.RoutersBase):
+
+    def append_v3_routers(self, mapper, routers):
+        regions_controller = controllers.RegionV3()
+        routers.append(router.Router(regions_controller,
+                                     'regions', 'region',
+                                     resource_descriptions=self.v3_resources))
+
+        # Need to add an additional route to support PUT /regions/{region_id}
+        mapper.connect(
+            '/regions/{region_id}',
+            controller=regions_controller,
+            action='create_region_with_id',
+            conditions=dict(method=['PUT']))
+
+        routers.append(router.Router(controllers.ServiceV3(),
+                                     'services', 'service',
+                                     resource_descriptions=self.v3_resources))
+        routers.append(router.Router(controllers.EndpointV3(),
+                                     'endpoints', 'endpoint',
+                                     resource_descriptions=self.v3_resources))
diff --git a/keystone-moon/keystone/catalog/schema.py b/keystone-moon/keystone/catalog/schema.py
new file mode 100644 (file)
index 0000000..a779ad0
--- /dev/null
@@ -0,0 +1,96 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common.validation import parameter_types
+
+
+_region_properties = {
+    'description': parameter_types.description,
+    # NOTE(lbragstad): Regions use ID differently. The user can specify the ID
+    # or it will be generated automatically.
+    'id': {
+        'type': 'string'
+    },
+    'parent_region_id': {
+        'type': ['string', 'null']
+    }
+}
+
+region_create = {
+    'type': 'object',
+    'properties': _region_properties,
+    'additionalProperties': True
+    # NOTE(lbragstad): No parameters are required for creating regions.
+}
+
+region_update = {
+    'type': 'object',
+    'properties': _region_properties,
+    'minProperties': 1,
+    'additionalProperties': True
+}
+
+_service_properties = {
+    'enabled': parameter_types.boolean,
+    'name': parameter_types.name,
+    'type': {
+        'type': 'string',
+        'minLength': 1,
+        'maxLength': 255
+    }
+}
+
+service_create = {
+    'type': 'object',
+    'properties': _service_properties,
+    'required': ['type'],
+    'additionalProperties': True,
+}
+
+service_update = {
+    'type': 'object',
+    'properties': _service_properties,
+    'minProperties': 1,
+    'additionalProperties': True
+}
+
+_endpoint_properties = {
+    'enabled': parameter_types.boolean,
+    'interface': {
+        'type': 'string',
+        'enum': ['admin', 'internal', 'public']
+    },
+    'region_id': {
+        'type': 'string'
+    },
+    'region': {
+        'type': 'string'
+    },
+    'service_id': {
+        'type': 'string'
+    },
+    'url': parameter_types.url
+}
+
+endpoint_create = {
+    'type': 'object',
+    'properties': _endpoint_properties,
+    'required': ['interface', 'service_id', 'url'],
+    'additionalProperties': True
+}
+
+endpoint_update = {
+    'type': 'object',
+    'properties': _endpoint_properties,
+    'minProperties': 1,
+    'additionalProperties': True
+}
diff --git a/keystone-moon/keystone/clean.py b/keystone-moon/keystone/clean.py
new file mode 100644 (file)
index 0000000..38564e0
--- /dev/null
@@ -0,0 +1,87 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import six
+
+from keystone import exception
+from keystone.i18n import _
+
+
+def check_length(property_name, value, min_length=1, max_length=64):
+    if len(value) < min_length:
+        if min_length == 1:
+            msg = _("%s cannot be empty.") % property_name
+        else:
+            msg = (_("%(property_name)s cannot be less than "
+                   "%(min_length)s characters.") % dict(
+                       property_name=property_name, min_length=min_length))
+        raise exception.ValidationError(msg)
+    if len(value) > max_length:
+        msg = (_("%(property_name)s should not be greater than "
+               "%(max_length)s characters.") % dict(
+                   property_name=property_name, max_length=max_length))
+
+        raise exception.ValidationError(msg)
+
+
+def check_type(property_name, value, expected_type, display_expected_type):
+    if not isinstance(value, expected_type):
+        msg = (_("%(property_name)s is not a "
+                 "%(display_expected_type)s") % dict(
+                     property_name=property_name,
+                     display_expected_type=display_expected_type))
+        raise exception.ValidationError(msg)
+
+
+def check_enabled(property_name, enabled):
+    # Allow int and it's subclass bool
+    check_type('%s enabled' % property_name, enabled, int, 'boolean')
+    return bool(enabled)
+
+
+def check_name(property_name, name, min_length=1, max_length=64):
+    check_type('%s name' % property_name, name, six.string_types,
+               'str or unicode')
+    name = name.strip()
+    check_length('%s name' % property_name, name,
+                 min_length=min_length, max_length=max_length)
+    return name
+
+
+def domain_name(name):
+    return check_name('Domain', name)
+
+
+def domain_enabled(enabled):
+    return check_enabled('Domain', enabled)
+
+
+def project_name(name):
+    return check_name('Project', name)
+
+
+def project_enabled(enabled):
+    return check_enabled('Project', enabled)
+
+
+def user_name(name):
+    return check_name('User', name, max_length=255)
+
+
+def user_enabled(enabled):
+    return check_enabled('User', enabled)
+
+
+def group_name(name):
+    return check_name('Group', name)
diff --git a/keystone-moon/keystone/cli.py b/keystone-moon/keystone/cli.py
new file mode 100644 (file)
index 0000000..b5fff13
--- /dev/null
@@ -0,0 +1,596 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from __future__ import absolute_import
+from __future__ import print_function
+
+import os
+
+from oslo_config import cfg
+from oslo_log import log
+import pbr.version
+
+from keystone import assignment
+from keystone.common import driver_hints
+from keystone.common import openssl
+from keystone.common import sql
+from keystone.common.sql import migration_helpers
+from keystone.common import utils
+from keystone import config
+from keystone import exception
+from keystone.i18n import _, _LW
+from keystone import identity
+from keystone import resource
+from keystone import token
+from keystone.token.providers.fernet import utils as fernet
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+class BaseApp(object):
+
+    name = None
+
+    @classmethod
+    def add_argument_parser(cls, subparsers):
+        parser = subparsers.add_parser(cls.name, help=cls.__doc__)
+        parser.set_defaults(cmd_class=cls)
+        return parser
+
+
+class DbSync(BaseApp):
+    """Sync the database."""
+
+    name = 'db_sync'
+
+    @classmethod
+    def add_argument_parser(cls, subparsers):
+        parser = super(DbSync, cls).add_argument_parser(subparsers)
+        parser.add_argument('version', default=None, nargs='?',
+                            help=('Migrate the database up to a specified '
+                                  'version. If not provided, db_sync will '
+                                  'migrate the database to the latest known '
+                                  'version.'))
+        parser.add_argument('--extension', default=None,
+                            help=('Migrate the database for the specified '
+                                  'extension. If not provided, db_sync will '
+                                  'migrate the common repository.'))
+
+        return parser
+
+    @staticmethod
+    def main():
+        version = CONF.command.version
+        extension = CONF.command.extension
+        migration_helpers.sync_database_to_version(extension, version)
+
+
+class DbVersion(BaseApp):
+    """Print the current migration version of the database."""
+
+    name = 'db_version'
+
+    @classmethod
+    def add_argument_parser(cls, subparsers):
+        parser = super(DbVersion, cls).add_argument_parser(subparsers)
+        parser.add_argument('--extension', default=None,
+                            help=('Print the migration version of the '
+                                  'database for the specified extension. If '
+                                  'not provided, print it for the common '
+                                  'repository.'))
+
+    @staticmethod
+    def main():
+        extension = CONF.command.extension
+        migration_helpers.print_db_version(extension)
+
+
+class BasePermissionsSetup(BaseApp):
+    """Common user/group setup for file permissions."""
+
+    @classmethod
+    def add_argument_parser(cls, subparsers):
+        parser = super(BasePermissionsSetup,
+                       cls).add_argument_parser(subparsers)
+        running_as_root = (os.geteuid() == 0)
+        parser.add_argument('--keystone-user', required=running_as_root)
+        parser.add_argument('--keystone-group', required=running_as_root)
+        return parser
+
+    @staticmethod
+    def get_user_group():
+        keystone_user_id = None
+        keystone_group_id = None
+
+        try:
+            a = CONF.command.keystone_user
+            if a:
+                keystone_user_id = utils.get_unix_user(a)[0]
+        except KeyError:
+            raise ValueError("Unknown user '%s' in --keystone-user" % a)
+
+        try:
+            a = CONF.command.keystone_group
+            if a:
+                keystone_group_id = utils.get_unix_group(a)[0]
+        except KeyError:
+            raise ValueError("Unknown group '%s' in --keystone-group" % a)
+
+        return keystone_user_id, keystone_group_id
+
+
+class BaseCertificateSetup(BasePermissionsSetup):
+    """Provides common options for certificate setup."""
+
+    @classmethod
+    def add_argument_parser(cls, subparsers):
+        parser = super(BaseCertificateSetup,
+                       cls).add_argument_parser(subparsers)
+        parser.add_argument('--rebuild', default=False, action='store_true',
+                            help=('Rebuild certificate files: erase previous '
+                                  'files and regenerate them.'))
+        return parser
+
+
+class PKISetup(BaseCertificateSetup):
+    """Set up Key pairs and certificates for token signing and verification.
+
+    This is NOT intended for production use, see Keystone Configuration
+    documentation for details.
+    """
+
+    name = 'pki_setup'
+
+    @classmethod
+    def main(cls):
+        LOG.warn(_LW('keystone-manage pki_setup is not recommended for '
+                     'production use.'))
+        keystone_user_id, keystone_group_id = cls.get_user_group()
+        conf_pki = openssl.ConfigurePKI(keystone_user_id, keystone_group_id,
+                                        rebuild=CONF.command.rebuild)
+        conf_pki.run()
+
+
+class SSLSetup(BaseCertificateSetup):
+    """Create key pairs and certificates for HTTPS connections.
+
+    This is NOT intended for production use, see Keystone Configuration
+    documentation for details.
+    """
+
+    name = 'ssl_setup'
+
+    @classmethod
+    def main(cls):
+        LOG.warn(_LW('keystone-manage ssl_setup is not recommended for '
+                     'production use.'))
+        keystone_user_id, keystone_group_id = cls.get_user_group()
+        conf_ssl = openssl.ConfigureSSL(keystone_user_id, keystone_group_id,
+                                        rebuild=CONF.command.rebuild)
+        conf_ssl.run()
+
+
+class FernetSetup(BasePermissionsSetup):
+    """Setup a key repository for Fernet tokens.
+
+    This also creates a primary key used for both creating and validating
+    Keystone Lightweight tokens. To improve security, you should rotate your
+    keys (using keystone-manage fernet_rotate, for example).
+
+    """
+
+    name = 'fernet_setup'
+
+    @classmethod
+    def main(cls):
+        keystone_user_id, keystone_group_id = cls.get_user_group()
+        fernet.create_key_directory(keystone_user_id, keystone_group_id)
+        if fernet.validate_key_repository():
+            fernet.initialize_key_repository(
+                keystone_user_id, keystone_group_id)
+
+
+class FernetRotate(BasePermissionsSetup):
+    """Rotate Fernet encryption keys.
+
+    This assumes you have already run keystone-manage fernet_setup.
+
+    A new primary key is placed into rotation, which is used for new tokens.
+    The old primary key is demoted to secondary, which can then still be used
+    for validating tokens. Excess secondary keys (beyond [fernet_tokens]
+    max_active_keys) are revoked. Revoked keys are permanently deleted. A new
+    staged key will be created and used to validate tokens. The next time key
+    rotation takes place, the staged key will be put into rotation as the
+    primary key.
+
+    Rotating keys too frequently, or with [fernet_tokens] max_active_keys set
+    too low, will cause tokens to become invalid prior to their expiration.
+
+    """
+
+    name = 'fernet_rotate'
+
+    @classmethod
+    def main(cls):
+        keystone_user_id, keystone_group_id = cls.get_user_group()
+        if fernet.validate_key_repository():
+            fernet.rotate_keys(keystone_user_id, keystone_group_id)
+
+
+class TokenFlush(BaseApp):
+    """Flush expired tokens from the backend."""
+
+    name = 'token_flush'
+
+    @classmethod
+    def main(cls):
+        token_manager = token.persistence.PersistenceManager()
+        token_manager.driver.flush_expired_tokens()
+
+
+class MappingPurge(BaseApp):
+    """Purge the mapping table."""
+
+    name = 'mapping_purge'
+
+    @classmethod
+    def add_argument_parser(cls, subparsers):
+        parser = super(MappingPurge, cls).add_argument_parser(subparsers)
+        parser.add_argument('--all', default=False, action='store_true',
+                            help=('Purge all mappings.'))
+        parser.add_argument('--domain-name', default=None,
+                            help=('Purge any mappings for the domain '
+                                  'specified.'))
+        parser.add_argument('--public-id', default=None,
+                            help=('Purge the mapping for the Public ID '
+                                  'specified.'))
+        parser.add_argument('--local-id', default=None,
+                            help=('Purge the mappings for the Local ID '
+                                  'specified.'))
+        parser.add_argument('--type', default=None, choices=['user', 'group'],
+                            help=('Purge any mappings for the type '
+                                  'specified.'))
+        return parser
+
+    @staticmethod
+    def main():
+        def validate_options():
+            # NOTE(henry-nash); It would be nice to use the argparse automated
+            # checking for this validation, but the only way I can see doing
+            # that is to make the default (i.e. if no optional parameters
+            # are specified) to purge all mappings - and that sounds too
+            # dangerous as a default.  So we use it in a slightly
+            # unconventional way, where all parameters are optional, but you
+            # must specify at least one.
+            if (CONF.command.all is False and
+                CONF.command.domain_name is None and
+                CONF.command.public_id is None and
+                CONF.command.local_id is None and
+                    CONF.command.type is None):
+                raise ValueError(_('At least one option must be provided'))
+
+            if (CONF.command.all is True and
+                (CONF.command.domain_name is not None or
+                 CONF.command.public_id is not None or
+                 CONF.command.local_id is not None or
+                 CONF.command.type is not None)):
+                raise ValueError(_('--all option cannot be mixed with '
+                                   'other options'))
+
+        def get_domain_id(name):
+            try:
+                identity.Manager()
+                # init assignment manager to avoid KeyError in resource.core
+                assignment.Manager()
+                resource_manager = resource.Manager()
+                return resource_manager.driver.get_domain_by_name(name)['id']
+            except KeyError:
+                raise ValueError(_("Unknown domain '%(name)s' specified by "
+                                   "--domain-name") % {'name': name})
+
+        validate_options()
+        # Now that we have validated the options, we know that at least one
+        # option has been specified, and if it was the --all option then this
+        # was the only option specified.
+        #
+        # The mapping dict is used to filter which mappings are purged, so
+        # leaving it empty means purge them all
+        mapping = {}
+        if CONF.command.domain_name is not None:
+            mapping['domain_id'] = get_domain_id(CONF.command.domain_name)
+        if CONF.command.public_id is not None:
+            mapping['public_id'] = CONF.command.public_id
+        if CONF.command.local_id is not None:
+            mapping['local_id'] = CONF.command.local_id
+        if CONF.command.type is not None:
+            mapping['type'] = CONF.command.type
+
+        mapping_manager = identity.MappingManager()
+        mapping_manager.driver.purge_mappings(mapping)
+
+
+DOMAIN_CONF_FHEAD = 'keystone.'
+DOMAIN_CONF_FTAIL = '.conf'
+
+
+class DomainConfigUploadFiles(object):
+
+    def __init__(self):
+        super(DomainConfigUploadFiles, self).__init__()
+        self.load_backends()
+
+    def load_backends(self):
+        """Load the backends needed for uploading domain configs.
+
+        We only need the resource and domain_config managers, but there are
+        some dependencies which mean we have to load the assignment and
+        identity managers as well.
+
+        The order of loading the backends is important, since the resource
+        manager depends on the assignment manager, which in turn depends on
+        the identity manager.
+
+        """
+        identity.Manager()
+        assignment.Manager()
+        self.resource_manager = resource.Manager()
+        self.domain_config_manager = resource.DomainConfigManager()
+
+    def valid_options(self):
+        """Validate the options, returning True if they are indeed valid.
+
+        It would be nice to use the argparse automated checking for this
+        validation, but the only way I can see doing that is to make the
+        default (i.e. if no optional parameters are specified) to upload
+        all configuration files - and that sounds too dangerous as a
+        default. So we use it in a slightly unconventional way, where all
+        parameters are optional, but you must specify at least one.
+
+        """
+        if (CONF.command.all is False and
+                CONF.command.domain_name is None):
+            print(_('At least one option must be provided, use either '
+                    '--all or --domain-name'))
+            raise ValueError
+
+        if (CONF.command.all is True and
+                CONF.command.domain_name is not None):
+            print(_('The --all option cannot be used with '
+                    'the --domain-name option'))
+            raise ValueError
+
+    def upload_config_to_database(self, file_name, domain_name):
+        """Upload a single config file to the database.
+
+        :param file_name: the file containing the config options
+        :param domain_name: the domain name
+
+        :raises: ValueError: the domain does not exist or already has domain
+                             specific configurations defined
+        :raises: Exceptions from oslo config: there is an issue with options
+                                              defined in the config file or its
+                                              format
+
+        The caller of this method should catch the errors raised and handle
+        appropriately in order that the best UX experience can be provided for
+        both the case of when a user has asked for a specific config file to
+        be uploaded, as well as all config files in a directory.
+
+        """
+        try:
+            domain_ref = (
+                self.resource_manager.driver.get_domain_by_name(domain_name))
+        except exception.DomainNotFound:
+            print(_('Invalid domain name: %(domain)s found in config file '
+                    'name: %(file)s - ignoring this file.') % {
+                        'domain': domain_name,
+                        'file': file_name})
+            raise ValueError
+
+        if self.domain_config_manager.get_config_with_sensitive_info(
+                domain_ref['id']):
+            print(_('Domain: %(domain)s already has a configuration '
+                    'defined - ignoring file: %(file)s.') % {
+                        'domain': domain_name,
+                        'file': file_name})
+            raise ValueError
+
+        sections = {}
+        try:
+            parser = cfg.ConfigParser(file_name, sections)
+            parser.parse()
+        except Exception:
+            # We explicitly don't try and differentiate the error cases, in
+            # order to keep the code in this tool more robust as oslo.config
+            # changes.
+            print(_('Error parsing configuration file for domain: %(domain)s, '
+                    'file: %(file)s.') % {
+                        'domain': domain_name,
+                        'file': file_name})
+            raise
+
+        for group in sections:
+            for option in sections[group]:
+                    sections[group][option] = sections[group][option][0]
+        self.domain_config_manager.create_config(domain_ref['id'], sections)
+
+    def upload_configs_to_database(self, file_name, domain_name):
+        """Upload configs from file and load into database.
+
+        This method will be called repeatedly for all the config files in the
+        config directory. To provide a better UX, we differentiate the error
+        handling in this case (versus when the user has asked for a single
+        config file to be uploaded).
+
+        """
+        try:
+            self.upload_config_to_database(file_name, domain_name)
+        except ValueError:
+            # We've already given all the info we can in a message, so carry
+            # on to the next one
+            pass
+        except Exception:
+            # Some other error occurred relating to this specific config file
+            # or domain. Since we are trying to upload all the config files,
+            # we'll continue and hide this exception. However, we tell the
+            # user how to get more info about this error by re-running with
+            # just the domain at fault. When we run in single-domain mode we
+            # will NOT hide the exception.
+            print(_('To get a more detailed information on this error, re-run '
+                    'this command for the specific domain, i.e.: '
+                    'keystone-manage domain_config_upload --domain-name %s') %
+                  domain_name)
+            pass
+
+    def read_domain_configs_from_files(self):
+        """Read configs from file(s) and load into database.
+
+        The command line parameters have already been parsed and the CONF
+        command option will have been set. It is either set to the name of an
+        explicit domain, or it's None to indicate that we want all domain
+        config files.
+
+        """
+        domain_name = CONF.command.domain_name
+        conf_dir = CONF.identity.domain_config_dir
+        if not os.path.exists(conf_dir):
+            print(_('Unable to locate domain config directory: %s') % conf_dir)
+            raise ValueError
+
+        if domain_name:
+            # Request is to upload the configs for just one domain
+            fname = DOMAIN_CONF_FHEAD + domain_name + DOMAIN_CONF_FTAIL
+            self.upload_config_to_database(
+                os.path.join(conf_dir, fname), domain_name)
+            return
+
+        # Request is to transfer all config files, so let's read all the
+        # files in the config directory, and transfer those that match the
+        # filename pattern of 'keystone.<domain_name>.conf'
+        for r, d, f in os.walk(conf_dir):
+            for fname in f:
+                if (fname.startswith(DOMAIN_CONF_FHEAD) and
+                        fname.endswith(DOMAIN_CONF_FTAIL)):
+                    if fname.count('.') >= 2:
+                        self.upload_configs_to_database(
+                            os.path.join(r, fname),
+                            fname[len(DOMAIN_CONF_FHEAD):
+                                  -len(DOMAIN_CONF_FTAIL)])
+                    else:
+                        LOG.warn(_LW('Ignoring file (%s) while scanning '
+                                     'domain config directory'), fname)
+
+    def run(self):
+        # First off, let's just check we can talk to the domain database
+        try:
+            self.resource_manager.driver.list_domains(driver_hints.Hints())
+        except Exception:
+            # It is likely that there is some SQL or other backend error
+            # related to set up
+            print(_('Unable to access the keystone database, please check it '
+                    'is configured correctly.'))
+            raise
+
+        try:
+            self.valid_options()
+            self.read_domain_configs_from_files()
+        except ValueError:
+            # We will already have printed out a nice message, so indicate
+            # to caller the non-success error code to be used.
+            return 1
+
+
+class DomainConfigUpload(BaseApp):
+    """Upload the domain specific configuration files to the database."""
+
+    name = 'domain_config_upload'
+
+    @classmethod
+    def add_argument_parser(cls, subparsers):
+        parser = super(DomainConfigUpload, cls).add_argument_parser(subparsers)
+        parser.add_argument('--all', default=False, action='store_true',
+                            help='Upload contents of all domain specific '
+                                 'configuration files. Either use this option '
+                                 'or use the --domain-name option to choose a '
+                                 'specific domain.')
+        parser.add_argument('--domain-name', default=None,
+                            help='Upload contents of the specific '
+                                 'configuration file for the given domain. '
+                                 'Either use this option or use the --all '
+                                 'option to upload contents for all domains.')
+        return parser
+
+    @staticmethod
+    def main():
+        dcu = DomainConfigUploadFiles()
+        status = dcu.run()
+        if status is not None:
+            exit(status)
+
+
+class SamlIdentityProviderMetadata(BaseApp):
+    """Generate Identity Provider metadata."""
+
+    name = 'saml_idp_metadata'
+
+    @staticmethod
+    def main():
+        # NOTE(marek-denis): Since federation is currently an extension import
+        # corresponding modules only when they are really going to be used.
+        from keystone.contrib.federation import idp
+        metadata = idp.MetadataGenerator().generate_metadata()
+        print(metadata.to_string())
+
+
+CMDS = [
+    DbSync,
+    DbVersion,
+    DomainConfigUpload,
+    FernetRotate,
+    FernetSetup,
+    MappingPurge,
+    PKISetup,
+    SamlIdentityProviderMetadata,
+    SSLSetup,
+    TokenFlush,
+]
+
+
+def add_command_parsers(subparsers):
+    for cmd in CMDS:
+        cmd.add_argument_parser(subparsers)
+
+
+command_opt = cfg.SubCommandOpt('command',
+                                title='Commands',
+                                help='Available commands',
+                                handler=add_command_parsers)
+
+
+def main(argv=None, config_files=None):
+    CONF.register_cli_opt(command_opt)
+
+    config.configure()
+    sql.initialize()
+    config.set_default_for_default_log_levels()
+
+    CONF(args=argv[1:],
+         project='keystone',
+         version=pbr.version.VersionInfo('keystone').version_string(),
+         usage='%(prog)s [' + '|'.join([cmd.name for cmd in CMDS]) + ']',
+         default_config_files=config_files)
+    config.setup_logging()
+    CONF.command.cmd_class.main()
diff --git a/keystone-moon/keystone/common/__init__.py b/keystone-moon/keystone/common/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/common/authorization.py b/keystone-moon/keystone/common/authorization.py
new file mode 100644 (file)
index 0000000..5cb1e63
--- /dev/null
@@ -0,0 +1,87 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 - 2012 Justin Santa Barbara
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_log import log
+
+from keystone import exception
+from keystone.i18n import _, _LW
+from keystone.models import token_model
+
+
+AUTH_CONTEXT_ENV = 'KEYSTONE_AUTH_CONTEXT'
+"""Environment variable used to convey the Keystone auth context.
+
+Auth context is essentially the user credential used for policy enforcement.
+It is a dictionary with the following attributes:
+
+* ``user_id``: user ID of the principal
+* ``project_id`` (optional): project ID of the scoped project if auth is
+                             project-scoped
+* ``domain_id`` (optional): domain ID of the scoped domain if auth is
+                            domain-scoped
+* ``roles`` (optional): list of role names for the given scope
+* ``group_ids``: list of group IDs for which the API user has membership
+
+"""
+
+LOG = log.getLogger(__name__)
+
+
+def token_to_auth_context(token):
+    if not isinstance(token, token_model.KeystoneToken):
+        raise exception.UnexpectedError(_('token reference must be a '
+                                          'KeystoneToken type, got: %s') %
+                                        type(token))
+    auth_context = {'token': token,
+                    'is_delegated_auth': False}
+    try:
+        auth_context['user_id'] = token.user_id
+    except KeyError:
+        LOG.warning(_LW('RBAC: Invalid user data in token'))
+        raise exception.Unauthorized()
+
+    if token.project_scoped:
+        auth_context['project_id'] = token.project_id
+    elif token.domain_scoped:
+        auth_context['domain_id'] = token.domain_id
+    else:
+        LOG.debug('RBAC: Proceeding without project or domain scope')
+
+    if token.trust_scoped:
+        auth_context['is_delegated_auth'] = True
+        auth_context['trust_id'] = token.trust_id
+        auth_context['trustor_id'] = token.trustor_user_id
+        auth_context['trustee_id'] = token.trustee_user_id
+    else:
+        auth_context['trust_id'] = None
+        auth_context['trustor_id'] = None
+        auth_context['trustee_id'] = None
+
+    roles = token.role_names
+    if roles:
+        auth_context['roles'] = roles
+
+    if token.oauth_scoped:
+        auth_context['is_delegated_auth'] = True
+    auth_context['consumer_id'] = token.oauth_consumer_id
+    auth_context['access_token_id'] = token.oauth_access_token_id
+
+    if token.is_federated_user:
+        auth_context['group_ids'] = token.federation_group_ids
+
+    return auth_context
diff --git a/keystone-moon/keystone/common/base64utils.py b/keystone-moon/keystone/common/base64utils.py
new file mode 100644 (file)
index 0000000..1a636f9
--- /dev/null
@@ -0,0 +1,396 @@
+# Copyright 2013 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+
+Python provides the base64 module as a core module but this is mostly
+limited to encoding and decoding base64 and it's variants. It is often
+useful to be able to perform other operations on base64 text. This
+module is meant to be used in conjunction with the core base64 module.
+
+Standardized base64 is defined in
+RFC-4648 "The Base16, Base32, and Base64 Data Encodings".
+
+This module provides the following base64 utility functionality:
+
+    * tests if text is valid base64
+    * filter formatting from base64
+    * convert base64 between different alphabets
+    * Handle padding issues
+        - test if base64 is padded
+        - removes padding
+        - restores padding
+    * wraps base64 text into formatted blocks
+        - via iterator
+        - return formatted string
+
+"""
+
+import re
+import string
+
+import six
+from six.moves import urllib
+
+from keystone.i18n import _
+
+
+class InvalidBase64Error(ValueError):
+    pass
+
+base64_alphabet_re = re.compile(r'^[^A-Za-z0-9+/=]+$')
+base64url_alphabet_re = re.compile(r'^[^A-Za-z0-9---_=]+$')
+
+base64_non_alphabet_re = re.compile(r'[^A-Za-z0-9+/=]+')
+base64url_non_alphabet_re = re.compile(r'[^A-Za-z0-9---_=]+')
+
+_strip_formatting_re = re.compile(r'\s+')
+
+_base64_to_base64url_trans = string.maketrans('+/', '-_')
+_base64url_to_base64_trans = string.maketrans('-_', '+/')
+
+
+def _check_padding_length(pad):
+    if len(pad) != 1:
+        raise ValueError(_('pad must be single character'))
+
+
+def is_valid_base64(text):
+    """Test if input text can be base64 decoded.
+
+    :param text: input base64 text
+    :type text: string
+    :returns: bool -- True if text can be decoded as base64, False otherwise
+    """
+
+    text = filter_formatting(text)
+
+    if base64_non_alphabet_re.search(text):
+        return False
+
+    try:
+        return base64_is_padded(text)
+    except InvalidBase64Error:
+        return False
+
+
+def is_valid_base64url(text):
+    """Test if input text can be base64url decoded.
+
+    :param text: input base64 text
+    :type text: string
+    :returns: bool -- True if text can be decoded as base64url,
+              False otherwise
+    """
+
+    text = filter_formatting(text)
+
+    if base64url_non_alphabet_re.search(text):
+        return False
+
+    try:
+        return base64_is_padded(text)
+    except InvalidBase64Error:
+        return False
+
+
+def filter_formatting(text):
+    """Return base64 text without any formatting, just the base64.
+
+    Base64 text is often formatted with whitespace, line endings,
+    etc. This function strips out any formatting, the result will
+    contain only base64 characters.
+
+    Note, this function does not filter out all non-base64 alphabet
+    characters, it only removes characters used for formatting.
+
+    :param text: input text to filter
+    :type text: string
+    :returns: string -- filtered text without formatting
+    """
+    return _strip_formatting_re.sub('', text)
+
+
+def base64_to_base64url(text):
+    """Convert base64 text to base64url text.
+
+    base64url text is designed to be safe for use in file names and
+    URL's. It is defined in RFC-4648 Section 5.
+
+    base64url differs from base64 in the last two alphabet characters
+    at index 62 and 63, these are sometimes referred as the
+    altchars. The '+' character at index 62 is replaced by '-'
+    (hyphen) and the '/' character at index 63 is replaced by '_'
+    (underscore).
+
+    This function only translates the altchars, non-alphabet
+    characters are not filtered out.
+
+    WARNING::
+
+        base64url continues to use the '=' pad character which is NOT URL
+        safe. RFC-4648 suggests two alternate methods to deal with this:
+
+        percent-encode
+            percent-encode the pad character (e.g. '=' becomes
+            '%3D'). This makes the base64url text fully safe. But
+            percent-encoding has the downside of requiring
+            percent-decoding prior to feeding the base64url text into a
+            base64url decoder since most base64url decoders do not
+            recognize %3D as a pad character and most decoders require
+            correct padding.
+
+        no-padding
+            padding is not strictly necessary to decode base64 or
+            base64url text, the pad can be computed from the input text
+            length. However many decoders demand padding and will consider
+            non-padded text to be malformed. If one wants to omit the
+            trailing pad character(s) for use in URL's it can be added back
+            using the base64_assure_padding() function.
+
+        This function makes no decisions about which padding methodology to
+        use. One can either call base64_strip_padding() to remove any pad
+        characters (restoring later with base64_assure_padding()) or call
+        base64url_percent_encode() to percent-encode the pad characters.
+
+    :param text: input base64 text
+    :type text: string
+    :returns: string -- base64url text
+    """
+    return text.translate(_base64_to_base64url_trans)
+
+
+def base64url_to_base64(text):
+    """Convert base64url text to base64 text.
+
+    See base64_to_base64url() for a description of base64url text and
+    it's issues.
+
+    This function does NOT handle percent-encoded pad characters, they
+    will be left intact. If the input base64url text is
+    percent-encoded you should call
+
+    :param text: text in base64url alphabet
+    :type text: string
+    :returns: string -- text in base64 alphabet
+
+    """
+    return text.translate(_base64url_to_base64_trans)
+
+
+def base64_is_padded(text, pad='='):
+    """Test if the text is base64 padded.
+
+    The input text must be in a base64 alphabet. The pad must be a
+    single character. If the text has been percent-encoded (e.g. pad
+    is the string '%3D') you must convert the text back to a base64
+    alphabet (e.g. if percent-encoded use the function
+    base64url_percent_decode()).
+
+    :param text: text containing ONLY characters in a base64 alphabet
+    :type text: string
+    :param pad: pad character (must be single character) (default: '=')
+    :type pad: string
+    :returns: bool -- True if padded, False otherwise
+    :raises: ValueError, InvalidBase64Error
+    """
+
+    _check_padding_length(pad)
+
+    text_len = len(text)
+    if text_len > 0 and text_len % 4 == 0:
+        pad_index = text.find(pad)
+        if pad_index >= 0 and pad_index < text_len - 2:
+            raise InvalidBase64Error(_('text is multiple of 4, '
+                                       'but pad "%s" occurs before '
+                                       '2nd to last char') % pad)
+        if pad_index == text_len - 2 and text[-1] != pad:
+            raise InvalidBase64Error(_('text is multiple of 4, '
+                                       'but pad "%s" occurs before '
+                                       'non-pad last char') % pad)
+        return True
+
+    if text.find(pad) >= 0:
+        raise InvalidBase64Error(_('text is not a multiple of 4, '
+                                   'but contains pad "%s"') % pad)
+    return False
+
+
+def base64url_percent_encode(text):
+    """Percent-encode base64url padding.
+
+    The input text should only contain base64url alphabet
+    characters. Any non-base64url alphabet characters will also be
+    subject to percent-encoding.
+
+    :param text: text containing ONLY characters in the base64url alphabet
+    :type text: string
+    :returns: string -- percent-encoded base64url text
+    :raises: InvalidBase64Error
+    """
+
+    if len(text) % 4 != 0:
+        raise InvalidBase64Error(_('padded base64url text must be '
+                                   'multiple of 4 characters'))
+
+    return urllib.parse.quote(text)
+
+
+def base64url_percent_decode(text):
+    """Percent-decode base64url padding.
+
+    The input text should only contain base64url alphabet
+    characters and the percent-encoded pad character. Any other
+    percent-encoded characters will be subject to percent-decoding.
+
+    :param text: base64url alphabet text
+    :type text: string
+    :returns: string -- percent-decoded base64url text
+    """
+
+    decoded_text = urllib.parse.unquote(text)
+
+    if len(decoded_text) % 4 != 0:
+        raise InvalidBase64Error(_('padded base64url text must be '
+                                   'multiple of 4 characters'))
+
+    return decoded_text
+
+
+def base64_strip_padding(text, pad='='):
+    """Remove padding from input base64 text.
+
+    :param text: text containing ONLY characters in a base64 alphabet
+    :type text: string
+    :param pad: pad character (must be single character) (default: '=')
+    :type pad: string
+    :returns: string -- base64 text without padding
+    :raises: ValueError
+    """
+    _check_padding_length(pad)
+
+    # Can't be padded if text is less than 4 characters.
+    if len(text) < 4:
+        return text
+
+    if text[-1] == pad:
+        if text[-2] == pad:
+            return text[0:-2]
+        else:
+            return text[0:-1]
+    else:
+        return text
+
+
+def base64_assure_padding(text, pad='='):
+    """Assure the input text ends with padding.
+
+    Base64 text is normally expected to be a multiple of 4
+    characters. Each 4 character base64 sequence produces 3 octets of
+    binary data. If the binary data is not a multiple of 3 the base64
+    text is padded at the end with a pad character such that it is
+    always a multiple of 4. Padding is ignored and does not alter the
+    binary data nor it's length.
+
+    In some circumstances it is desirable to omit the padding
+    character due to transport encoding conflicts. Base64 text can
+    still be correctly decoded if the length of the base64 text
+    (consisting only of characters in the desired base64 alphabet) is
+    known, padding is not absolutely necessary.
+
+    Some base64 decoders demand correct padding or one may wish to
+    format RFC compliant base64, this function performs this action.
+
+    Input is assumed to consist only of members of a base64
+    alphabet (i.e no whitespace). Iteration yields a sequence of lines.
+    The line does NOT terminate with a line ending.
+
+    Use the filter_formatting() function to assure the input text
+    contains only the members of the alphabet.
+
+    If the text ends with the pad it is assumed to already be
+    padded. Otherwise the binary length is computed from the input
+    text length and correct number of pad characters are appended.
+
+    :param text: text containing ONLY characters in a base64 alphabet
+    :type text: string
+    :param pad: pad character (must be single character) (default: '=')
+    :type pad: string
+    :returns: string -- input base64 text with padding
+    :raises: ValueError
+    """
+    _check_padding_length(pad)
+
+    if text.endswith(pad):
+        return text
+
+    n = len(text) % 4
+    if n == 0:
+        return text
+
+    n = 4 - n
+    padding = pad * n
+    return text + padding
+
+
+def base64_wrap_iter(text, width=64):
+    """Fold text into lines of text with max line length.
+
+    Input is assumed to consist only of members of a base64
+    alphabet (i.e no whitespace). Iteration yields a sequence of lines.
+    The line does NOT terminate with a line ending.
+
+    Use the filter_formatting() function to assure the input text
+    contains only the members of the alphabet.
+
+    :param text: text containing ONLY characters in a base64 alphabet
+    :type text: string
+    :param width: number of characters in each wrapped line (default: 64)
+    :type width: int
+    :returns: generator -- sequence of lines of base64 text.
+    """
+
+    text = six.text_type(text)
+    for x in six.moves.range(0, len(text), width):
+        yield text[x:x + width]
+
+
+def base64_wrap(text, width=64):
+    """Fold text into lines of text with max line length.
+
+    Input is assumed to consist only of members of a base64
+    alphabet (i.e no whitespace). Fold the text into lines whose
+    line length is width chars long, terminate each line with line
+    ending (default is '\\n'). Return the wrapped text as a single
+    string.
+
+    Use the filter_formatting() function to assure the input text
+    contains only the members of the alphabet.
+
+    :param text: text containing ONLY characters in a base64 alphabet
+    :type text: string
+    :param width: number of characters in each wrapped line (default: 64)
+    :type width: int
+    :returns: string -- wrapped text.
+    """
+
+    buf = six.StringIO()
+
+    for line in base64_wrap_iter(text, width):
+        buf.write(line)
+        buf.write(u'\n')
+
+    text = buf.getvalue()
+    buf.close()
+    return text
diff --git a/keystone-moon/keystone/common/cache/__init__.py b/keystone-moon/keystone/common/cache/__init__.py
new file mode 100644 (file)
index 0000000..4950239
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright 2013 Metacloud
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common.cache.core import *  # noqa
diff --git a/keystone-moon/keystone/common/cache/_memcache_pool.py b/keystone-moon/keystone/common/cache/_memcache_pool.py
new file mode 100644 (file)
index 0000000..b15332d
--- /dev/null
@@ -0,0 +1,233 @@
+# Copyright 2014 Mirantis Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Thread-safe connection pool for python-memcached."""
+
+# NOTE(yorik-sar): this file is copied between keystone and keystonemiddleware
+# and should be kept in sync until we can use external library for this.
+
+import collections
+import contextlib
+import itertools
+import logging
+import threading
+import time
+
+import memcache
+from oslo_log import log
+from six.moves import queue
+
+from keystone import exception
+from keystone.i18n import _
+
+
+LOG = log.getLogger(__name__)
+
+# This 'class' is taken from http://stackoverflow.com/a/22520633/238308
+# Don't inherit client from threading.local so that we can reuse clients in
+# different threads
+_MemcacheClient = type('_MemcacheClient', (object,),
+                       dict(memcache.Client.__dict__))
+
+_PoolItem = collections.namedtuple('_PoolItem', ['ttl', 'connection'])
+
+
+class ConnectionPool(queue.Queue):
+    """Base connection pool class
+
+    This class implements the basic connection pool logic as an abstract base
+    class.
+    """
+    def __init__(self, maxsize, unused_timeout, conn_get_timeout=None):
+        """Initialize the connection pool.
+
+        :param maxsize: maximum number of client connections for the pool
+        :type maxsize: int
+        :param unused_timeout: idle time to live for unused clients (in
+                               seconds). If a client connection object has been
+                               in the pool and idle for longer than the
+                               unused_timeout, it will be reaped. This is to
+                               ensure resources are released as utilization
+                               goes down.
+        :type unused_timeout: int
+        :param conn_get_timeout: maximum time in seconds to wait for a
+                                 connection. If set to `None` timeout is
+                                 indefinite.
+        :type conn_get_timeout: int
+        """
+        # super() cannot be used here because Queue in stdlib is an
+        # old-style class
+        queue.Queue.__init__(self, maxsize)
+        self._unused_timeout = unused_timeout
+        self._connection_get_timeout = conn_get_timeout
+        self._acquired = 0
+
+    def _create_connection(self):
+        """Returns a connection instance.
+
+        This is called when the pool needs another instance created.
+
+        :returns: a new connection instance
+
+        """
+        raise NotImplementedError
+
+    def _destroy_connection(self, conn):
+        """Destroy and cleanup a connection instance.
+
+        This is called when the pool wishes to get rid of an existing
+        connection. This is the opportunity for a subclass to free up
+        resources and cleaup after itself.
+
+        :param conn: the connection object to destroy
+
+        """
+        raise NotImplementedError
+
+    def _debug_logger(self, msg, *args, **kwargs):
+        if LOG.isEnabledFor(logging.DEBUG):
+            thread_id = threading.current_thread().ident
+            args = (id(self), thread_id) + args
+            prefix = 'Memcached pool %s, thread %s: '
+            LOG.debug(prefix + msg, *args, **kwargs)
+
+    @contextlib.contextmanager
+    def acquire(self):
+        self._debug_logger('Acquiring connection')
+        try:
+            conn = self.get(timeout=self._connection_get_timeout)
+        except queue.Empty:
+            raise exception.UnexpectedError(
+                _('Unable to get a connection from pool id %(id)s after '
+                  '%(seconds)s seconds.') %
+                {'id': id(self), 'seconds': self._connection_get_timeout})
+        self._debug_logger('Acquired connection %s', id(conn))
+        try:
+            yield conn
+        finally:
+            self._debug_logger('Releasing connection %s', id(conn))
+            self._drop_expired_connections()
+            try:
+                # super() cannot be used here because Queue in stdlib is an
+                # old-style class
+                queue.Queue.put(self, conn, block=False)
+            except queue.Full:
+                self._debug_logger('Reaping exceeding connection %s', id(conn))
+                self._destroy_connection(conn)
+
+    def _qsize(self):
+        if self.maxsize:
+            return self.maxsize - self._acquired
+        else:
+            # A value indicating there is always a free connection
+            # if maxsize is None or 0
+            return 1
+
+    # NOTE(dstanek): stdlib and eventlet Queue implementations
+    # have different names for the qsize method. This ensures
+    # that we override both of them.
+    if not hasattr(queue.Queue, '_qsize'):
+        qsize = _qsize
+
+    def _get(self):
+        if self.queue:
+            conn = self.queue.pop().connection
+        else:
+            conn = self._create_connection()
+        self._acquired += 1
+        return conn
+
+    def _drop_expired_connections(self):
+        """Drop all expired connections from the right end of the queue."""
+        now = time.time()
+        while self.queue and self.queue[0].ttl < now:
+            conn = self.queue.popleft().connection
+            self._debug_logger('Reaping connection %s', id(conn))
+            self._destroy_connection(conn)
+
+    def _put(self, conn):
+        self.queue.append(_PoolItem(
+            ttl=time.time() + self._unused_timeout,
+            connection=conn,
+        ))
+        self._acquired -= 1
+
+
+class MemcacheClientPool(ConnectionPool):
+    def __init__(self, urls, arguments, **kwargs):
+        # super() cannot be used here because Queue in stdlib is an
+        # old-style class
+        ConnectionPool.__init__(self, **kwargs)
+        self.urls = urls
+        self._arguments = arguments
+        # NOTE(morganfainberg): The host objects expect an int for the
+        # deaduntil value. Initialize this at 0 for each host with 0 indicating
+        # the host is not dead.
+        self._hosts_deaduntil = [0] * len(urls)
+
+    def _create_connection(self):
+        return _MemcacheClient(self.urls, **self._arguments)
+
+    def _destroy_connection(self, conn):
+        conn.disconnect_all()
+
+    def _get(self):
+        # super() cannot be used here because Queue in stdlib is an
+        # old-style class
+        conn = ConnectionPool._get(self)
+        try:
+            # Propagate host state known to us to this client's list
+            now = time.time()
+            for deaduntil, host in zip(self._hosts_deaduntil, conn.servers):
+                if deaduntil > now and host.deaduntil <= now:
+                    host.mark_dead('propagating death mark from the pool')
+                host.deaduntil = deaduntil
+        except Exception:
+            # We need to be sure that connection doesn't leak from the pool.
+            # This code runs before we enter context manager's try-finally
+            # block, so we need to explicitly release it here.
+            # super() cannot be used here because Queue in stdlib is an
+            # old-style class
+            ConnectionPool._put(self, conn)
+            raise
+        return conn
+
+    def _put(self, conn):
+        try:
+            # If this client found that one of the hosts is dead, mark it as
+            # such in our internal list
+            now = time.time()
+            for i, host in zip(itertools.count(), conn.servers):
+                deaduntil = self._hosts_deaduntil[i]
+                # Do nothing if we already know this host is dead
+                if deaduntil <= now:
+                    if host.deaduntil > now:
+                        self._hosts_deaduntil[i] = host.deaduntil
+                        self._debug_logger(
+                            'Marked host %s dead until %s',
+                            self.urls[i], host.deaduntil)
+                    else:
+                        self._hosts_deaduntil[i] = 0
+            # If all hosts are dead we should forget that they're dead. This
+            # way we won't get completely shut off until dead_retry seconds
+            # pass, but will be checking servers as frequent as we can (over
+            # way smaller socket_timeout)
+            if all(deaduntil > now for deaduntil in self._hosts_deaduntil):
+                self._debug_logger('All hosts are dead. Marking them as live.')
+                self._hosts_deaduntil[:] = [0] * len(self._hosts_deaduntil)
+        finally:
+            # super() cannot be used here because Queue in stdlib is an
+            # old-style class
+            ConnectionPool._put(self, conn)
diff --git a/keystone-moon/keystone/common/cache/backends/__init__.py b/keystone-moon/keystone/common/cache/backends/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/common/cache/backends/memcache_pool.py b/keystone-moon/keystone/common/cache/backends/memcache_pool.py
new file mode 100644 (file)
index 0000000..f3990b1
--- /dev/null
@@ -0,0 +1,61 @@
+# Copyright 2014 Mirantis Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""dogpile.cache backend that uses Memcached connection pool"""
+
+import functools
+import logging
+
+from dogpile.cache.backends import memcached as memcached_backend
+
+from keystone.common.cache import _memcache_pool
+
+
+LOG = logging.getLogger(__name__)
+
+
+# Helper to ease backend refactoring
+class ClientProxy(object):
+    def __init__(self, client_pool):
+        self.client_pool = client_pool
+
+    def _run_method(self, __name, *args, **kwargs):
+        with self.client_pool.acquire() as client:
+            return getattr(client, __name)(*args, **kwargs)
+
+    def __getattr__(self, name):
+        return functools.partial(self._run_method, name)
+
+
+class PooledMemcachedBackend(memcached_backend.MemcachedBackend):
+    # Composed from GenericMemcachedBackend's and MemcacheArgs's __init__
+    def __init__(self, arguments):
+        super(PooledMemcachedBackend, self).__init__(arguments)
+        self.client_pool = _memcache_pool.MemcacheClientPool(
+            self.url,
+            arguments={
+                'dead_retry': arguments.get('dead_retry', 5 * 60),
+                'socket_timeout': arguments.get('socket_timeout', 3),
+            },
+            maxsize=arguments.get('pool_maxsize', 10),
+            unused_timeout=arguments.get('pool_unused_timeout', 60),
+            conn_get_timeout=arguments.get('pool_connection_get_timeout', 10),
+        )
+
+    # Since all methods in backend just call one of methods of client, this
+    # lets us avoid need to hack it too much
+    @property
+    def client(self):
+        return ClientProxy(self.client_pool)
diff --git a/keystone-moon/keystone/common/cache/backends/mongo.py b/keystone-moon/keystone/common/cache/backends/mongo.py
new file mode 100644 (file)
index 0000000..b5de9bc
--- /dev/null
@@ -0,0 +1,557 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+import datetime
+
+from dogpile.cache import api
+from dogpile.cache import util as dp_util
+from oslo_log import log
+from oslo_utils import importutils
+from oslo_utils import timeutils
+import six
+
+from keystone import exception
+from keystone.i18n import _, _LW
+
+
+NO_VALUE = api.NO_VALUE
+LOG = log.getLogger(__name__)
+
+
+class MongoCacheBackend(api.CacheBackend):
+    """A MongoDB based caching backend implementing dogpile backend APIs.
+
+    Arguments accepted in the arguments dictionary:
+
+    :param db_hosts: string (required), hostname or IP address of the
+        MongoDB server instance. This can be a single MongoDB connection URI,
+        or a list of MongoDB connection URIs.
+
+    :param db_name: string (required), the name of the database to be used.
+
+    :param cache_collection: string (required), the name of collection to store
+        cached data.
+        *Note:* Different collection name can be provided if there is need to
+        create separate container (i.e. collection) for cache data. So region
+        configuration is done per collection.
+
+    Following are optional parameters for MongoDB backend configuration,
+
+    :param username: string, the name of the user to authenticate.
+
+    :param password: string, the password of the user to authenticate.
+
+    :param max_pool_size: integer, the maximum number of connections that the
+        pool will open simultaneously. By default the pool size is 10.
+
+    :param w: integer, write acknowledgement for MongoDB client
+
+        If not provided, then no default is set on MongoDB and then write
+        acknowledgement behavior occurs as per MongoDB default. This parameter
+        name is same as what is used in MongoDB docs. This value is specified
+        at collection level so its applicable to `cache_collection` db write
+        operations.
+
+        If this is a replica set, write operations will block until they have
+        been replicated to the specified number or tagged set  of servers.
+        Setting w=0 disables write acknowledgement and all other write concern
+        options.
+
+    :param read_preference: string, the read preference mode for MongoDB client
+        Expected value is ``primary``, ``primaryPreferred``, ``secondary``,
+        ``secondaryPreferred``, or ``nearest``. This read_preference is
+        specified at collection level so its applicable to `cache_collection`
+        db read operations.
+
+    :param use_replica: boolean, flag to indicate if replica client to be
+        used. Default is `False`. `replicaset_name` value is required if
+        `True`.
+
+    :param replicaset_name: string, name of replica set.
+        Becomes required if `use_replica` is `True`
+
+    :param son_manipulator: string, name of class with module name which
+        implements MongoDB SONManipulator.
+        Default manipulator used is :class:`.BaseTransform`.
+
+        This manipulator is added per database. In multiple cache
+        configurations, the manipulator name should be same if same
+        database name ``db_name`` is used in those configurations.
+
+        SONManipulator is used to manipulate custom data types as they are
+        saved or retrieved from MongoDB. Custom impl is only needed if cached
+        data is custom class and needs transformations when saving or reading
+        from db. If dogpile cached value contains built-in data types, then
+        BaseTransform class is sufficient as it already handles dogpile
+        CachedValue class transformation.
+
+    :param mongo_ttl_seconds: integer, interval in seconds to indicate maximum
+        time-to-live value.
+        If value is greater than 0, then its assumed that cache_collection
+        needs to be TTL type (has index at 'doc_date' field).
+        By default, the value is -1 and its disabled.
+        Reference: <http://docs.mongodb.org/manual/tutorial/expire-data/>
+
+        .. NOTE::
+
+            This parameter is different from Dogpile own
+            expiration_time, which is the number of seconds after which Dogpile
+            will consider the value to be expired. When Dogpile considers a
+            value to be expired, it continues to use the value until generation
+            of a new value is complete, when using CacheRegion.get_or_create().
+            Therefore, if you are setting `mongo_ttl_seconds`, you will want to
+            make sure it is greater than expiration_time by at least enough
+            seconds for new values to be generated, else the value would not
+            be available during a regeneration, forcing all threads to wait for
+            a regeneration each time a value expires.
+
+    :param ssl: boolean, If True, create the connection to the server
+        using SSL. Default is `False`. Client SSL connection parameters depends
+        on server side SSL setup. For further reference on SSL configuration:
+        <http://docs.mongodb.org/manual/tutorial/configure-ssl/>
+
+    :param ssl_keyfile: string, the private keyfile used to identify the
+        local connection against mongod. If included with the certfile then
+        only the `ssl_certfile` is needed. Used only when `ssl` is `True`.
+
+    :param ssl_certfile: string, the certificate file used to identify the
+        local connection against mongod. Used only when `ssl` is `True`.
+
+    :param ssl_ca_certs: string, the ca_certs file contains a set of
+        concatenated 'certification authority' certificates, which are used to
+        validate certificates passed from the other end of the connection.
+        Used only when `ssl` is `True`.
+
+    :param ssl_cert_reqs: string, the parameter cert_reqs specifies whether
+        a certificate is required from the other side of the connection, and
+        whether it will be validated if provided. It must be one of the three
+        values ``ssl.CERT_NONE`` (certificates ignored), ``ssl.CERT_OPTIONAL``
+        (not required, but validated if provided), or
+        ``ssl.CERT_REQUIRED`` (required and validated). If the value of this
+        parameter is not ``ssl.CERT_NONE``, then the ssl_ca_certs parameter
+        must point to a file of CA certificates. Used only when `ssl`
+        is `True`.
+
+    Rest of arguments are passed to mongo calls for read, write and remove.
+    So related options can be specified to pass to these operations.
+
+    Further details of various supported arguments can be referred from
+    <http://api.mongodb.org/python/current/api/pymongo/>
+
+    """
+
+    def __init__(self, arguments):
+        self.api = MongoApi(arguments)
+
+    @dp_util.memoized_property
+    def client(self):
+        """Initializes MongoDB connection and collection defaults.
+
+        This initialization is done only once and performed as part of lazy
+        inclusion of MongoDB dependency i.e. add imports only if related
+        backend is used.
+
+        :return: :class:`.MongoApi` instance
+        """
+        self.api.get_cache_collection()
+        return self.api
+
+    def get(self, key):
+        value = self.client.get(key)
+        if value is None:
+            return NO_VALUE
+        else:
+            return value
+
+    def get_multi(self, keys):
+        values = self.client.get_multi(keys)
+        return [
+            NO_VALUE if key not in values
+            else values[key] for key in keys
+        ]
+
+    def set(self, key, value):
+        self.client.set(key, value)
+
+    def set_multi(self, mapping):
+        self.client.set_multi(mapping)
+
+    def delete(self, key):
+        self.client.delete(key)
+
+    def delete_multi(self, keys):
+        self.client.delete_multi(keys)
+
+
+class MongoApi(object):
+    """Class handling MongoDB specific functionality.
+
+    This class uses PyMongo APIs internally to create database connection
+    with configured pool size, ensures unique index on key, does database
+    authentication and ensure TTL collection index if configured so.
+    This class also serves as handle to cache collection for dogpile cache
+    APIs.
+
+    In a single deployment, multiple cache configuration can be defined. In
+    that case of multiple cache collections usage, db client connection pool
+    is shared when cache collections are within same database.
+    """
+
+    # class level attributes for re-use of db client connection and collection
+    _DB = {}  # dict of db_name: db connection reference
+    _MONGO_COLLS = {}  # dict of cache_collection : db collection reference
+
+    def __init__(self, arguments):
+        self._init_args(arguments)
+        self._data_manipulator = None
+
+    def _init_args(self, arguments):
+        """Helper logic for collecting and parsing MongoDB specific arguments.
+
+        The arguments passed in are separated out in connection specific
+        setting and rest of arguments are passed to create/update/delete
+        db operations.
+        """
+        self.conn_kwargs = {}  # connection specific arguments
+
+        self.hosts = arguments.pop('db_hosts', None)
+        if self.hosts is None:
+            msg = _('db_hosts value is required')
+            raise exception.ValidationError(message=msg)
+
+        self.db_name = arguments.pop('db_name', None)
+        if self.db_name is None:
+            msg = _('database db_name is required')
+            raise exception.ValidationError(message=msg)
+
+        self.cache_collection = arguments.pop('cache_collection', None)
+        if self.cache_collection is None:
+            msg = _('cache_collection name is required')
+            raise exception.ValidationError(message=msg)
+
+        self.username = arguments.pop('username', None)
+        self.password = arguments.pop('password', None)
+        self.max_pool_size = arguments.pop('max_pool_size', 10)
+
+        self.w = arguments.pop('w', -1)
+        try:
+            self.w = int(self.w)
+        except ValueError:
+            msg = _('integer value expected for w (write concern attribute)')
+            raise exception.ValidationError(message=msg)
+
+        self.read_preference = arguments.pop('read_preference', None)
+
+        self.use_replica = arguments.pop('use_replica', False)
+        if self.use_replica:
+            if arguments.get('replicaset_name') is None:
+                msg = _('replicaset_name required when use_replica is True')
+                raise exception.ValidationError(message=msg)
+            self.replicaset_name = arguments.get('replicaset_name')
+
+        self.son_manipulator = arguments.pop('son_manipulator', None)
+
+        # set if mongo collection needs to be TTL type.
+        # This needs to be max ttl for any cache entry.
+        # By default, -1 means don't use TTL collection.
+        # With ttl set, it creates related index and have doc_date field with
+        # needed expiration interval
+        self.ttl_seconds = arguments.pop('mongo_ttl_seconds', -1)
+        try:
+            self.ttl_seconds = int(self.ttl_seconds)
+        except ValueError:
+            msg = _('integer value expected for mongo_ttl_seconds')
+            raise exception.ValidationError(message=msg)
+
+        self.conn_kwargs['ssl'] = arguments.pop('ssl', False)
+        if self.conn_kwargs['ssl']:
+            ssl_keyfile = arguments.pop('ssl_keyfile', None)
+            ssl_certfile = arguments.pop('ssl_certfile', None)
+            ssl_ca_certs = arguments.pop('ssl_ca_certs', None)
+            ssl_cert_reqs = arguments.pop('ssl_cert_reqs', None)
+            if ssl_keyfile:
+                self.conn_kwargs['ssl_keyfile'] = ssl_keyfile
+            if ssl_certfile:
+                self.conn_kwargs['ssl_certfile'] = ssl_certfile
+            if ssl_ca_certs:
+                self.conn_kwargs['ssl_ca_certs'] = ssl_ca_certs
+            if ssl_cert_reqs:
+                self.conn_kwargs['ssl_cert_reqs'] = (
+                    self._ssl_cert_req_type(ssl_cert_reqs))
+
+        # rest of arguments are passed to mongo crud calls
+        self.meth_kwargs = arguments
+
+    def _ssl_cert_req_type(self, req_type):
+        try:
+            import ssl
+        except ImportError:
+            raise exception.ValidationError(_('no ssl support available'))
+        req_type = req_type.upper()
+        try:
+            return {
+                'NONE': ssl.CERT_NONE,
+                'OPTIONAL': ssl.CERT_OPTIONAL,
+                'REQUIRED': ssl.CERT_REQUIRED
+            }[req_type]
+        except KeyError:
+            msg = _('Invalid ssl_cert_reqs value of %s, must be one of '
+                    '"NONE", "OPTIONAL", "REQUIRED"') % (req_type)
+            raise exception.ValidationError(message=msg)
+
+    def _get_db(self):
+        # defer imports until backend is used
+        global pymongo
+        import pymongo
+        if self.use_replica:
+            connection = pymongo.MongoReplicaSetClient(
+                host=self.hosts, replicaSet=self.replicaset_name,
+                max_pool_size=self.max_pool_size, **self.conn_kwargs)
+        else:  # used for standalone node or mongos in sharded setup
+            connection = pymongo.MongoClient(
+                host=self.hosts, max_pool_size=self.max_pool_size,
+                **self.conn_kwargs)
+
+        database = getattr(connection, self.db_name)
+
+        self._assign_data_mainpulator()
+        database.add_son_manipulator(self._data_manipulator)
+        if self.username and self.password:
+            database.authenticate(self.username, self.password)
+        return database
+
+    def _assign_data_mainpulator(self):
+        if self._data_manipulator is None:
+            if self.son_manipulator:
+                self._data_manipulator = importutils.import_object(
+                    self.son_manipulator)
+            else:
+                self._data_manipulator = BaseTransform()
+
+    def _get_doc_date(self):
+        if self.ttl_seconds > 0:
+            expire_delta = datetime.timedelta(seconds=self.ttl_seconds)
+            doc_date = timeutils.utcnow() + expire_delta
+        else:
+            doc_date = timeutils.utcnow()
+        return doc_date
+
+    def get_cache_collection(self):
+        if self.cache_collection not in self._MONGO_COLLS:
+            global pymongo
+            import pymongo
+            # re-use db client connection if already defined as part of
+            # earlier dogpile cache configuration
+            if self.db_name not in self._DB:
+                self._DB[self.db_name] = self._get_db()
+            coll = getattr(self._DB[self.db_name], self.cache_collection)
+
+            self._assign_data_mainpulator()
+            if self.read_preference:
+                self.read_preference = pymongo.read_preferences.mongos_enum(
+                    self.read_preference)
+                coll.read_preference = self.read_preference
+            if self.w > -1:
+                coll.write_concern['w'] = self.w
+            if self.ttl_seconds > 0:
+                kwargs = {'expireAfterSeconds': self.ttl_seconds}
+                coll.ensure_index('doc_date', cache_for=5, **kwargs)
+            else:
+                self._validate_ttl_index(coll, self.cache_collection,
+                                         self.ttl_seconds)
+            self._MONGO_COLLS[self.cache_collection] = coll
+
+        return self._MONGO_COLLS[self.cache_collection]
+
+    def _get_cache_entry(self, key, value, meta, doc_date):
+        """MongoDB cache data representation.
+
+        Storing cache key as ``_id`` field as MongoDB by default creates
+        unique index on this field. So no need to create separate field and
+        index for storing cache key. Cache data has additional ``doc_date``
+        field for MongoDB TTL collection support.
+        """
+        return dict(_id=key, value=value, meta=meta, doc_date=doc_date)
+
+    def _validate_ttl_index(self, collection, coll_name, ttl_seconds):
+        """Checks if existing TTL index is removed on a collection.
+
+        This logs warning when existing collection has TTL index defined and
+        new cache configuration tries to disable index with
+        ``mongo_ttl_seconds < 0``. In that case, existing index needs
+        to be addressed first to make new configuration effective.
+        Refer to MongoDB documentation around TTL index for further details.
+        """
+        indexes = collection.index_information()
+        for indx_name, index_data in six.iteritems(indexes):
+            if all(k in index_data for k in ('key', 'expireAfterSeconds')):
+                existing_value = index_data['expireAfterSeconds']
+                fld_present = 'doc_date' in index_data['key'][0]
+                if fld_present and existing_value > -1 and ttl_seconds < 1:
+                    msg = _LW('TTL index already exists on db collection '
+                              '<%(c_name)s>, remove index <%(indx_name)s> '
+                              'first to make updated mongo_ttl_seconds value '
+                              'to be  effective')
+                    LOG.warn(msg, {'c_name': coll_name,
+                                   'indx_name': indx_name})
+
+    def get(self, key):
+        critieria = {'_id': key}
+        result = self.get_cache_collection().find_one(spec_or_id=critieria,
+                                                      **self.meth_kwargs)
+        if result:
+            return result['value']
+        else:
+            return None
+
+    def get_multi(self, keys):
+        db_results = self._get_results_as_dict(keys)
+        return {doc['_id']: doc['value'] for doc in six.itervalues(db_results)}
+
+    def _get_results_as_dict(self, keys):
+        critieria = {'_id': {'$in': keys}}
+        db_results = self.get_cache_collection().find(spec=critieria,
+                                                      **self.meth_kwargs)
+        return {doc['_id']: doc for doc in db_results}
+
+    def set(self, key, value):
+        doc_date = self._get_doc_date()
+        ref = self._get_cache_entry(key, value.payload, value.metadata,
+                                    doc_date)
+        spec = {'_id': key}
+        # find and modify does not have manipulator support
+        # so need to do conversion as part of input document
+        ref = self._data_manipulator.transform_incoming(ref, self)
+        self.get_cache_collection().find_and_modify(spec, ref, upsert=True,
+                                                    **self.meth_kwargs)
+
+    def set_multi(self, mapping):
+        """Insert multiple documents specified as key, value pairs.
+
+        In this case, multiple documents can be added via insert provided they
+        do not exist.
+        Update of multiple existing documents is done one by one
+        """
+        doc_date = self._get_doc_date()
+        insert_refs = []
+        update_refs = []
+        existing_docs = self._get_results_as_dict(mapping.keys())
+        for key, value in mapping.items():
+            ref = self._get_cache_entry(key, value.payload, value.metadata,
+                                        doc_date)
+            if key in existing_docs:
+                ref['_id'] = existing_docs[key]['_id']
+                update_refs.append(ref)
+            else:
+                insert_refs.append(ref)
+        if insert_refs:
+            self.get_cache_collection().insert(insert_refs, manipulate=True,
+                                               **self.meth_kwargs)
+        for upd_doc in update_refs:
+            self.get_cache_collection().save(upd_doc, manipulate=True,
+                                             **self.meth_kwargs)
+
+    def delete(self, key):
+        critieria = {'_id': key}
+        self.get_cache_collection().remove(spec_or_id=critieria,
+                                           **self.meth_kwargs)
+
+    def delete_multi(self, keys):
+        critieria = {'_id': {'$in': keys}}
+        self.get_cache_collection().remove(spec_or_id=critieria,
+                                           **self.meth_kwargs)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class AbstractManipulator(object):
+    """Abstract class with methods which need to be implemented for custom
+    manipulation.
+
+    Adding this as a base class for :class:`.BaseTransform` instead of adding
+    import dependency of pymongo specific class i.e.
+    `pymongo.son_manipulator.SONManipulator` and using that as base class.
+    This is done to avoid pymongo dependency if MongoDB backend is not used.
+    """
+    @abc.abstractmethod
+    def transform_incoming(self, son, collection):
+        """Used while saving data to MongoDB.
+
+        :param son: the SON object to be inserted into the database
+        :param collection: the collection the object is being inserted into
+
+        :returns: transformed SON object
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def transform_outgoing(self, son, collection):
+        """Used while reading data from MongoDB.
+
+        :param son: the SON object being retrieved from the database
+        :param collection: the collection this object was stored in
+
+        :returns: transformed SON object
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def will_copy(self):
+        """Will this SON manipulator make a copy of the incoming document?
+
+        Derived classes that do need to make a copy should override this
+        method, returning `True` instead of `False`.
+
+        :returns: boolean
+        """
+        return False
+
+
+class BaseTransform(AbstractManipulator):
+    """Base transformation class to store and read dogpile cached data
+    from MongoDB.
+
+    This is needed as dogpile internally stores data as a custom class
+    i.e. dogpile.cache.api.CachedValue
+
+    Note: Custom manipulator needs to always override ``transform_incoming``
+    and ``transform_outgoing`` methods. MongoDB manipulator logic specifically
+    checks that overridden method in instance and its super are different.
+    """
+
+    def transform_incoming(self, son, collection):
+        """Used while saving data to MongoDB."""
+        for (key, value) in son.items():
+            if isinstance(value, api.CachedValue):
+                son[key] = value.payload  # key is 'value' field here
+                son['meta'] = value.metadata
+            elif isinstance(value, dict):  # Make sure we recurse into sub-docs
+                son[key] = self.transform_incoming(value, collection)
+        return son
+
+    def transform_outgoing(self, son, collection):
+        """Used while reading data from MongoDB."""
+        metadata = None
+        # make sure its top level dictionary with all expected fields names
+        # present
+        if isinstance(son, dict) and all(k in son for k in
+                                         ('_id', 'value', 'meta', 'doc_date')):
+            payload = son.pop('value', None)
+            metadata = son.pop('meta', None)
+        for (key, value) in son.items():
+            if isinstance(value, dict):
+                son[key] = self.transform_outgoing(value, collection)
+        if metadata is not None:
+            son['value'] = api.CachedValue(payload, metadata)
+        return son
diff --git a/keystone-moon/keystone/common/cache/backends/noop.py b/keystone-moon/keystone/common/cache/backends/noop.py
new file mode 100644 (file)
index 0000000..38329c9
--- /dev/null
@@ -0,0 +1,49 @@
+# Copyright 2013 Metacloud
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from dogpile.cache import api
+
+
+NO_VALUE = api.NO_VALUE
+
+
+class NoopCacheBackend(api.CacheBackend):
+    """A no op backend as a default caching backend.
+
+    The no op backend is provided as the default caching backend for keystone
+    to ensure that ``dogpile.cache.memory`` is not used in any real-world
+    circumstances unintentionally.  ``dogpile.cache.memory`` does not have a
+    mechanism to cleanup it's internal dict and therefore could cause run-away
+    memory utilization.
+    """
+    def __init__(self, *args):
+        return
+
+    def get(self, key):
+        return NO_VALUE
+
+    def get_multi(self, keys):
+        return [NO_VALUE for x in keys]
+
+    def set(self, key, value):
+        return
+
+    def set_multi(self, mapping):
+        return
+
+    def delete(self, key):
+        return
+
+    def delete_multi(self, keys):
+        return
diff --git a/keystone-moon/keystone/common/cache/core.py b/keystone-moon/keystone/common/cache/core.py
new file mode 100644 (file)
index 0000000..306587b
--- /dev/null
@@ -0,0 +1,308 @@
+# Copyright 2013 Metacloud
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Keystone Caching Layer Implementation."""
+
+import dogpile.cache
+from dogpile.cache import proxy
+from dogpile.cache import util
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import importutils
+
+from keystone import exception
+from keystone.i18n import _, _LE
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+make_region = dogpile.cache.make_region
+
+dogpile.cache.register_backend(
+    'keystone.common.cache.noop',
+    'keystone.common.cache.backends.noop',
+    'NoopCacheBackend')
+
+dogpile.cache.register_backend(
+    'keystone.cache.mongo',
+    'keystone.common.cache.backends.mongo',
+    'MongoCacheBackend')
+
+dogpile.cache.register_backend(
+    'keystone.cache.memcache_pool',
+    'keystone.common.cache.backends.memcache_pool',
+    'PooledMemcachedBackend')
+
+
+class DebugProxy(proxy.ProxyBackend):
+    """Extra Logging ProxyBackend."""
+    # NOTE(morganfainberg): Pass all key/values through repr to ensure we have
+    # a clean description of the information.  Without use of repr, it might
+    # be possible to run into encode/decode error(s). For logging/debugging
+    # purposes encode/decode is irrelevant and we should be looking at the
+    # data exactly as it stands.
+
+    def get(self, key):
+        value = self.proxied.get(key)
+        LOG.debug('CACHE_GET: Key: "%(key)r" Value: "%(value)r"',
+                  {'key': key, 'value': value})
+        return value
+
+    def get_multi(self, keys):
+        values = self.proxied.get_multi(keys)
+        LOG.debug('CACHE_GET_MULTI: "%(keys)r" Values: "%(values)r"',
+                  {'keys': keys, 'values': values})
+        return values
+
+    def set(self, key, value):
+        LOG.debug('CACHE_SET: Key: "%(key)r" Value: "%(value)r"',
+                  {'key': key, 'value': value})
+        return self.proxied.set(key, value)
+
+    def set_multi(self, keys):
+        LOG.debug('CACHE_SET_MULTI: "%r"', keys)
+        self.proxied.set_multi(keys)
+
+    def delete(self, key):
+        self.proxied.delete(key)
+        LOG.debug('CACHE_DELETE: "%r"', key)
+
+    def delete_multi(self, keys):
+        LOG.debug('CACHE_DELETE_MULTI: "%r"', keys)
+        self.proxied.delete_multi(keys)
+
+
+def build_cache_config():
+    """Build the cache region dictionary configuration.
+
+    :returns: dict
+    """
+    prefix = CONF.cache.config_prefix
+    conf_dict = {}
+    conf_dict['%s.backend' % prefix] = CONF.cache.backend
+    conf_dict['%s.expiration_time' % prefix] = CONF.cache.expiration_time
+    for argument in CONF.cache.backend_argument:
+        try:
+            (argname, argvalue) = argument.split(':', 1)
+        except ValueError:
+            msg = _LE('Unable to build cache config-key. Expected format '
+                      '"<argname>:<value>". Skipping unknown format: %s')
+            LOG.error(msg, argument)
+            continue
+
+        arg_key = '.'.join([prefix, 'arguments', argname])
+        conf_dict[arg_key] = argvalue
+
+        LOG.debug('Keystone Cache Config: %s', conf_dict)
+    # NOTE(yorik-sar): these arguments will be used for memcache-related
+    # backends. Use setdefault for url to support old-style setting through
+    # backend_argument=url:127.0.0.1:11211
+    conf_dict.setdefault('%s.arguments.url' % prefix,
+                         CONF.cache.memcache_servers)
+    for arg in ('dead_retry', 'socket_timeout', 'pool_maxsize',
+                'pool_unused_timeout', 'pool_connection_get_timeout'):
+        value = getattr(CONF.cache, 'memcache_' + arg)
+        conf_dict['%s.arguments.%s' % (prefix, arg)] = value
+
+    return conf_dict
+
+
+def configure_cache_region(region):
+    """Configure a cache region.
+
+    :param region: optional CacheRegion object, if not provided a new region
+                   will be instantiated
+    :raises: exception.ValidationError
+    :returns: dogpile.cache.CacheRegion
+    """
+    if not isinstance(region, dogpile.cache.CacheRegion):
+        raise exception.ValidationError(
+            _('region not type dogpile.cache.CacheRegion'))
+
+    if not region.is_configured:
+        # NOTE(morganfainberg): this is how you tell if a region is configured.
+        # There is a request logged with dogpile.cache upstream to make this
+        # easier / less ugly.
+
+        config_dict = build_cache_config()
+        region.configure_from_config(config_dict,
+                                     '%s.' % CONF.cache.config_prefix)
+
+        if CONF.cache.debug_cache_backend:
+            region.wrap(DebugProxy)
+
+        # NOTE(morganfainberg): if the backend requests the use of a
+        # key_mangler, we should respect that key_mangler function.  If a
+        # key_mangler is not defined by the backend, use the sha1_mangle_key
+        # mangler provided by dogpile.cache. This ensures we always use a fixed
+        # size cache-key.
+        if region.key_mangler is None:
+            region.key_mangler = util.sha1_mangle_key
+
+        for class_path in CONF.cache.proxies:
+            # NOTE(morganfainberg): if we have any proxy wrappers, we should
+            # ensure they are added to the cache region's backend.  Since
+            # configure_from_config doesn't handle the wrap argument, we need
+            # to manually add the Proxies. For information on how the
+            # ProxyBackends work, see the dogpile.cache documents on
+            # "changing-backend-behavior"
+            cls = importutils.import_class(class_path)
+            LOG.debug("Adding cache-proxy '%s' to backend.", class_path)
+            region.wrap(cls)
+
+    return region
+
+
+def get_should_cache_fn(section):
+    """Build a function that returns a config section's caching status.
+
+    For any given driver in keystone that has caching capabilities, a boolean
+    config option for that driver's section (e.g. ``token``) should exist and
+    default to ``True``.  This function will use that value to tell the caching
+    decorator if caching for that driver is enabled.  To properly use this
+    with the decorator, pass this function the configuration section and assign
+    the result to a variable.  Pass the new variable to the caching decorator
+    as the named argument ``should_cache_fn``.  e.g.::
+
+        from keystone.common import cache
+
+        SHOULD_CACHE = cache.get_should_cache_fn('token')
+
+        @cache.on_arguments(should_cache_fn=SHOULD_CACHE)
+        def function(arg1, arg2):
+            ...
+
+    :param section: name of the configuration section to examine
+    :type section: string
+    :returns: function reference
+    """
+    def should_cache(value):
+        if not CONF.cache.enabled:
+            return False
+        conf_group = getattr(CONF, section)
+        return getattr(conf_group, 'caching', True)
+    return should_cache
+
+
+def get_expiration_time_fn(section):
+    """Build a function that returns a config section's expiration time status.
+
+    For any given driver in keystone that has caching capabilities, an int
+    config option called ``cache_time`` for that driver's section
+    (e.g. ``token``) should exist and typically default to ``None``. This
+    function will use that value to tell the caching decorator of the TTL
+    override for caching the resulting objects. If the value of the config
+    option is ``None`` the default value provided in the
+    ``[cache] expiration_time`` option will be used by the decorator. The
+    default may be set to something other than ``None`` in cases where the
+    caching TTL should not be tied to the global default(s) (e.g.
+    revocation_list changes very infrequently and can be cached for >1h by
+    default).
+
+    To properly use this with the decorator, pass this function the
+    configuration section and assign the result to a variable. Pass the new
+    variable to the caching decorator as the named argument
+    ``expiration_time``.  e.g.::
+
+        from keystone.common import cache
+
+        EXPIRATION_TIME = cache.get_expiration_time_fn('token')
+
+        @cache.on_arguments(expiration_time=EXPIRATION_TIME)
+        def function(arg1, arg2):
+            ...
+
+    :param section: name of the configuration section to examine
+    :type section: string
+    :rtype: function reference
+    """
+    def get_expiration_time():
+        conf_group = getattr(CONF, section)
+        return getattr(conf_group, 'cache_time', None)
+    return get_expiration_time
+
+
+def key_generate_to_str(s):
+    # NOTE(morganfainberg): Since we need to stringify all arguments, attempt
+    # to stringify and handle the Unicode error explicitly as needed.
+    try:
+        return str(s)
+    except UnicodeEncodeError:
+        return s.encode('utf-8')
+
+
+def function_key_generator(namespace, fn, to_str=key_generate_to_str):
+    # NOTE(morganfainberg): This wraps dogpile.cache's default
+    # function_key_generator to change the default to_str mechanism.
+    return util.function_key_generator(namespace, fn, to_str=to_str)
+
+
+REGION = dogpile.cache.make_region(
+    function_key_generator=function_key_generator)
+on_arguments = REGION.cache_on_arguments
+
+
+def get_memoization_decorator(section, expiration_section=None):
+    """Build a function based on the `on_arguments` decorator for the section.
+
+    For any given driver in Keystone that has caching capabilities, a
+    pair of functions is required to properly determine the status of the
+    caching capabilities (a toggle to indicate caching is enabled and any
+    override of the default TTL for cached data). This function will return
+    an object that has the memoization decorator ``on_arguments``
+    pre-configured for the driver.
+
+    Example usage::
+
+        from keystone.common import cache
+
+        MEMOIZE = cache.get_memoization_decorator(section='token')
+
+        @MEMOIZE
+        def function(arg1, arg2):
+            ...
+
+
+        ALTERNATE_MEMOIZE = cache.get_memoization_decorator(
+            section='token', expiration_section='revoke')
+
+        @ALTERNATE_MEMOIZE
+        def function2(arg1, arg2):
+            ...
+
+    :param section: name of the configuration section to examine
+    :type section: string
+    :param expiration_section: name of the configuration section to examine
+                               for the expiration option. This will fall back
+                               to using ``section`` if the value is unspecified
+                               or ``None``
+    :type expiration_section: string
+    :rtype: function reference
+    """
+    if expiration_section is None:
+        expiration_section = section
+    should_cache = get_should_cache_fn(section)
+    expiration_time = get_expiration_time_fn(expiration_section)
+
+    memoize = REGION.cache_on_arguments(should_cache_fn=should_cache,
+                                        expiration_time=expiration_time)
+
+    # Make sure the actual "should_cache" and "expiration_time" methods are
+    # available. This is potentially interesting/useful to pre-seed cache
+    # values.
+    memoize.should_cache = should_cache
+    memoize.get_expiration_time = expiration_time
+
+    return memoize
diff --git a/keystone-moon/keystone/common/config.py b/keystone-moon/keystone/common/config.py
new file mode 100644 (file)
index 0000000..bcaedee
--- /dev/null
@@ -0,0 +1,1118 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+import oslo_messaging
+
+
+_DEFAULT_AUTH_METHODS = ['external', 'password', 'token', 'oauth1']
+_CERTFILE = '/etc/keystone/ssl/certs/signing_cert.pem'
+_KEYFILE = '/etc/keystone/ssl/private/signing_key.pem'
+_SSO_CALLBACK = '/etc/keystone/sso_callback_template.html'
+
+
+FILE_OPTIONS = {
+    None: [
+        cfg.StrOpt('admin_token', secret=True, default='ADMIN',
+                   help='A "shared secret" that can be used to bootstrap '
+                        'Keystone. This "token" does not represent a user, '
+                        'and carries no explicit authorization. To disable '
+                        'in production (highly recommended), remove '
+                        'AdminTokenAuthMiddleware from your paste '
+                        'application pipelines (for example, in '
+                        'keystone-paste.ini).'),
+        cfg.IntOpt('compute_port', default=8774,
+                   help='(Deprecated) The port which the OpenStack Compute '
+                        'service listens on. This option was only used for '
+                        'string replacement in the templated catalog backend. '
+                        'Templated catalogs should replace the '
+                        '"$(compute_port)s" substitution with the static port '
+                        'of the compute service. As of Juno, this option is '
+                        'deprecated and will be removed in the L release.'),
+        cfg.StrOpt('public_endpoint',
+                   help='The base public endpoint URL for Keystone that is '
+                        'advertised to clients (NOTE: this does NOT affect '
+                        'how Keystone listens for connections). '
+                        'Defaults to the base host URL of the request. E.g. a '
+                        'request to http://server:5000/v3/users will '
+                        'default to http://server:5000. You should only need '
+                        'to set this value if the base URL contains a path '
+                        '(e.g. /prefix/v3) or the endpoint should be found '
+                        'on a different server.'),
+        cfg.StrOpt('admin_endpoint',
+                   help='The base admin endpoint URL for Keystone that is '
+                        'advertised to clients (NOTE: this does NOT affect '
+                        'how Keystone listens for connections). '
+                        'Defaults to the base host URL of the request. E.g. a '
+                        'request to http://server:35357/v3/users will '
+                        'default to http://server:35357. You should only need '
+                        'to set this value if the base URL contains a path '
+                        '(e.g. /prefix/v3) or the endpoint should be found '
+                        'on a different server.'),
+        cfg.IntOpt('max_project_tree_depth', default=5,
+                   help='Maximum depth of the project hierarchy. WARNING: '
+                        'setting it to a large value may adversely impact '
+                        'performance.'),
+        cfg.IntOpt('max_param_size', default=64,
+                   help='Limit the sizes of user & project ID/names.'),
+        # we allow tokens to be a bit larger to accommodate PKI
+        cfg.IntOpt('max_token_size', default=8192,
+                   help='Similar to max_param_size, but provides an '
+                        'exception for token values.'),
+        cfg.StrOpt('member_role_id',
+                   default='9fe2ff9ee4384b1894a90878d3e92bab',
+                   help='Similar to the member_role_name option, this '
+                        'represents the default role ID used to associate '
+                        'users with their default projects in the v2 API. '
+                        'This will be used as the explicit role where one is '
+                        'not specified by the v2 API.'),
+        cfg.StrOpt('member_role_name', default='_member_',
+                   help='This is the role name used in combination with the '
+                        'member_role_id option; see that option for more '
+                        'detail.'),
+        cfg.IntOpt('crypt_strength', default=40000,
+                   help='The value passed as the keyword "rounds" to '
+                        'passlib\'s encrypt method.'),
+        cfg.IntOpt('list_limit',
+                   help='The maximum number of entities that will be '
+                        'returned in a collection, with no limit set by '
+                        'default. This global limit may be then overridden '
+                        'for a specific driver, by specifying a list_limit '
+                        'in the appropriate section (e.g. [assignment]).'),
+        cfg.BoolOpt('domain_id_immutable', default=True,
+                    help='Set this to false if you want to enable the '
+                         'ability for user, group and project entities '
+                         'to be moved between domains by updating their '
+                         'domain_id. Allowing such movement is not '
+                         'recommended if the scope of a domain admin is being '
+                         'restricted by use of an appropriate policy file '
+                         '(see policy.v3cloudsample as an example).'),
+        cfg.BoolOpt('strict_password_check', default=False,
+                    help='If set to true, strict password length checking is '
+                         'performed for password manipulation. If a password '
+                         'exceeds the maximum length, the operation will fail '
+                         'with an HTTP 403 Forbidden error. If set to false, '
+                         'passwords are automatically truncated to the '
+                         'maximum length.'),
+        cfg.StrOpt('secure_proxy_ssl_header',
+                   help='The HTTP header used to determine the scheme for the '
+                        'original request, even if it was removed by an SSL '
+                        'terminating proxy. Typical value is '
+                        '"HTTP_X_FORWARDED_PROTO".'),
+    ],
+    'identity': [
+        cfg.StrOpt('default_domain_id', default='default',
+                   help='This references the domain to use for all '
+                        'Identity API v2 requests (which are not aware of '
+                        'domains). A domain with this ID will be created '
+                        'for you by keystone-manage db_sync in migration '
+                        '008. The domain referenced by this ID cannot be '
+                        'deleted on the v3 API, to prevent accidentally '
+                        'breaking the v2 API. There is nothing special about '
+                        'this domain, other than the fact that it must '
+                        'exist to order to maintain support for your v2 '
+                        'clients.'),
+        cfg.BoolOpt('domain_specific_drivers_enabled',
+                    default=False,
+                    help='A subset (or all) of domains can have their own '
+                         'identity driver, each with their own partial '
+                         'configuration options, stored in either the '
+                         'resource backend or in a file in a domain '
+                         'configuration directory (depending on the setting '
+                         'of domain_configurations_from_database). Only '
+                         'values specific to the domain need to be specified '
+                         'in this manner. This feature is disabled by '
+                         'default; set to true to enable.'),
+        cfg.BoolOpt('domain_configurations_from_database',
+                    default=False,
+                    help='Extract the domain specific configuration options '
+                         'from the resource backend where they have been '
+                         'stored with the domain data. This feature is '
+                         'disabled by default (in which case the domain '
+                         'specific options will be loaded from files in the '
+                         'domain configuration directory); set to true to '
+                         'enable.'),
+        cfg.StrOpt('domain_config_dir',
+                   default='/etc/keystone/domains',
+                   help='Path for Keystone to locate the domain specific '
+                        'identity configuration files if '
+                        'domain_specific_drivers_enabled is set to true.'),
+        cfg.StrOpt('driver',
+                   default=('keystone.identity.backends'
+                            '.sql.Identity'),
+                   help='Identity backend driver.'),
+        cfg.BoolOpt('caching', default=True,
+                    help='Toggle for identity caching. This has no '
+                         'effect unless global caching is enabled.'),
+        cfg.IntOpt('cache_time', default=600,
+                   help='Time to cache identity data (in seconds). This has '
+                        'no effect unless global and identity caching are '
+                        'enabled.'),
+        cfg.IntOpt('max_password_length', default=4096,
+                   help='Maximum supported length for user passwords; '
+                        'decrease to improve performance.'),
+        cfg.IntOpt('list_limit',
+                   help='Maximum number of entities that will be returned in '
+                        'an identity collection.'),
+    ],
+    'identity_mapping': [
+        cfg.StrOpt('driver',
+                   default=('keystone.identity.mapping_backends'
+                            '.sql.Mapping'),
+                   help='Keystone Identity Mapping backend driver.'),
+        cfg.StrOpt('generator',
+                   default=('keystone.identity.id_generators'
+                            '.sha256.Generator'),
+                   help='Public ID generator for user and group entities. '
+                        'The Keystone identity mapper only supports '
+                        'generators that produce no more than 64 characters.'),
+        cfg.BoolOpt('backward_compatible_ids',
+                    default=True,
+                    help='The format of user and group IDs changed '
+                         'in Juno for backends that do not generate UUIDs '
+                         '(e.g. LDAP), with keystone providing a hash mapping '
+                         'to the underlying attribute in LDAP. By default '
+                         'this mapping is disabled, which ensures that '
+                         'existing IDs will not change. Even when the '
+                         'mapping is enabled by using domain specific '
+                         'drivers, any users and groups from the default '
+                         'domain being handled by LDAP will still not be '
+                         'mapped to ensure their IDs remain backward '
+                         'compatible. Setting this value to False will '
+                         'enable the mapping for even the default LDAP '
+                         'driver. It is only safe to do this if you do not '
+                         'already have assignments for users and '
+                         'groups from the default LDAP domain, and it is '
+                         'acceptable for Keystone to provide the different '
+                         'IDs to clients than it did previously.  Typically '
+                         'this means that the only time you can set this '
+                         'value to False is when configuring a fresh '
+                         'installation.'),
+    ],
+    'trust': [
+        cfg.BoolOpt('enabled', default=True,
+                    help='Delegation and impersonation features can be '
+                         'optionally disabled.'),
+        cfg.BoolOpt('allow_redelegation', default=False,
+                    help='Enable redelegation feature.'),
+        cfg.IntOpt('max_redelegation_count', default=3,
+                   help='Maximum depth of trust redelegation.'),
+        cfg.StrOpt('driver',
+                   default='keystone.trust.backends.sql.Trust',
+                   help='Trust backend driver.')],
+    'os_inherit': [
+        cfg.BoolOpt('enabled', default=False,
+                    help='role-assignment inheritance to projects from '
+                         'owning domain or from projects higher in the '
+                         'hierarchy can be optionally enabled.'),
+    ],
+    'fernet_tokens': [
+        cfg.StrOpt('key_repository',
+                   default='/etc/keystone/fernet-keys/',
+                   help='Directory containing Fernet token keys.'),
+        cfg.IntOpt('max_active_keys',
+                   default=3,
+                   help='This controls how many keys are held in rotation by '
+                        'keystone-manage fernet_rotate before they are '
+                        'discarded. The default value of 3 means that '
+                        'keystone will maintain one staged key, one primary '
+                        'key, and one secondary key. Increasing this value '
+                        'means that additional secondary keys will be kept in '
+                        'the rotation.'),
+    ],
+    'token': [
+        cfg.ListOpt('bind', default=[],
+                    help='External auth mechanisms that should add bind '
+                         'information to token, e.g., kerberos,x509.'),
+        cfg.StrOpt('enforce_token_bind', default='permissive',
+                   help='Enforcement policy on tokens presented to Keystone '
+                        'with bind information. One of disabled, permissive, '
+                        'strict, required or a specifically required bind '
+                        'mode, e.g., kerberos or x509 to require binding to '
+                        'that authentication.'),
+        cfg.IntOpt('expiration', default=3600,
+                   help='Amount of time a token should remain valid '
+                        '(in seconds).'),
+        cfg.StrOpt('provider',
+                   default='keystone.token.providers.uuid.Provider',
+                   help='Controls the token construction, validation, and '
+                        'revocation operations. Core providers are '
+                        '"keystone.token.providers.[fernet|pkiz|pki|uuid].'
+                        'Provider".'),
+        cfg.StrOpt('driver',
+                   default='keystone.token.persistence.backends.sql.Token',
+                   help='Token persistence backend driver.'),
+        cfg.BoolOpt('caching', default=True,
+                    help='Toggle for token system caching. This has no '
+                         'effect unless global caching is enabled.'),
+        cfg.IntOpt('cache_time',
+                   help='Time to cache tokens (in seconds). This has no '
+                        'effect unless global and token caching are '
+                        'enabled.'),
+        cfg.BoolOpt('revoke_by_id', default=True,
+                    help='Revoke token by token identifier. Setting '
+                    'revoke_by_id to true enables various forms of '
+                    'enumerating tokens, e.g. `list tokens for user`. '
+                    'These enumerations are processed to determine the '
+                    'list of tokens to revoke. Only disable if you are '
+                    'switching to using the Revoke extension with a '
+                    'backend other than KVS, which stores events in memory.'),
+        cfg.BoolOpt('allow_rescope_scoped_token', default=True,
+                    help='Allow rescoping of scoped token. Setting '
+                    'allow_rescoped_scoped_token to false prevents a user '
+                    'from exchanging a scoped token for any other token.'),
+        cfg.StrOpt('hash_algorithm', default='md5',
+                   help="The hash algorithm to use for PKI tokens. This can "
+                        "be set to any algorithm that hashlib supports. "
+                        "WARNING: Before changing this value, the auth_token "
+                        "middleware must be configured with the "
+                        "hash_algorithms, otherwise token revocation will "
+                        "not be processed correctly."),
+    ],
+    'revoke': [
+        cfg.StrOpt('driver',
+                   default='keystone.contrib.revoke.backends.sql.Revoke',
+                   help='An implementation of the backend for persisting '
+                        'revocation events.'),
+        cfg.IntOpt('expiration_buffer', default=1800,
+                   help='This value (calculated in seconds) is added to token '
+                        'expiration before a revocation event may be removed '
+                        'from the backend.'),
+        cfg.BoolOpt('caching', default=True,
+                    help='Toggle for revocation event caching. This has no '
+                         'effect unless global caching is enabled.'),
+        cfg.IntOpt('cache_time', default=3600,
+                   help='Time to cache the revocation list and the revocation '
+                        'events (in seconds). This has no effect unless '
+                        'global and token caching are enabled.',
+                   deprecated_opts=[cfg.DeprecatedOpt(
+                       'revocation_cache_time', group='token')]),
+    ],
+    'cache': [
+        cfg.StrOpt('config_prefix', default='cache.keystone',
+                   help='Prefix for building the configuration dictionary '
+                        'for the cache region. This should not need to be '
+                        'changed unless there is another dogpile.cache '
+                        'region with the same configuration name.'),
+        cfg.IntOpt('expiration_time', default=600,
+                   help='Default TTL, in seconds, for any cached item in '
+                        'the dogpile.cache region. This applies to any '
+                        'cached method that doesn\'t have an explicit '
+                        'cache expiration time defined for it.'),
+        # NOTE(morganfainberg): the dogpile.cache.memory acceptable in devstack
+        # and other such single-process/thread deployments. Running
+        # dogpile.cache.memory in any other configuration has the same pitfalls
+        # as the KVS token backend. It is recommended that either Redis or
+        # Memcached are used as the dogpile backend for real workloads. To
+        # prevent issues with the memory cache ending up in "production"
+        # unintentionally, we register a no-op as the keystone default caching
+        # backend.
+        cfg.StrOpt('backend', default='keystone.common.cache.noop',
+                   help='Dogpile.cache backend module. It is recommended '
+                        'that Memcache with pooling '
+                        '(keystone.cache.memcache_pool) or Redis '
+                        '(dogpile.cache.redis) be used in production '
+                        'deployments.  Small workloads (single process) '
+                        'like devstack can use the dogpile.cache.memory '
+                        'backend.'),
+        cfg.MultiStrOpt('backend_argument', default=[],
+                        help='Arguments supplied to the backend module. '
+                             'Specify this option once per argument to be '
+                             'passed to the dogpile.cache backend. Example '
+                             'format: "<argname>:<value>".'),
+        cfg.ListOpt('proxies', default=[],
+                    help='Proxy classes to import that will affect the way '
+                         'the dogpile.cache backend functions. See the '
+                         'dogpile.cache documentation on '
+                         'changing-backend-behavior.'),
+        cfg.BoolOpt('enabled', default=False,
+                    help='Global toggle for all caching using the '
+                         'should_cache_fn mechanism.'),
+        cfg.BoolOpt('debug_cache_backend', default=False,
+                    help='Extra debugging from the cache backend (cache '
+                         'keys, get/set/delete/etc calls). This is only '
+                         'really useful if you need to see the specific '
+                         'cache-backend get/set/delete calls with the '
+                         'keys/values.  Typically this should be left set '
+                         'to false.'),
+        cfg.ListOpt('memcache_servers', default=['localhost:11211'],
+                    help='Memcache servers in the format of "host:port".'
+                    ' (dogpile.cache.memcache and keystone.cache.memcache_pool'
+                    ' backends only).'),
+        cfg.IntOpt('memcache_dead_retry',
+                   default=5 * 60,
+                   help='Number of seconds memcached server is considered dead'
+                   ' before it is tried again. (dogpile.cache.memcache and'
+                   ' keystone.cache.memcache_pool backends only).'),
+        cfg.IntOpt('memcache_socket_timeout',
+                   default=3,
+                   help='Timeout in seconds for every call to a server.'
+                   ' (dogpile.cache.memcache and keystone.cache.memcache_pool'
+                   ' backends only).'),
+        cfg.IntOpt('memcache_pool_maxsize',
+                   default=10,
+                   help='Max total number of open connections to every'
+                   ' memcached server. (keystone.cache.memcache_pool backend'
+                   ' only).'),
+        cfg.IntOpt('memcache_pool_unused_timeout',
+                   default=60,
+                   help='Number of seconds a connection to memcached is held'
+                   ' unused in the pool before it is closed.'
+                   ' (keystone.cache.memcache_pool backend only).'),
+        cfg.IntOpt('memcache_pool_connection_get_timeout',
+                   default=10,
+                   help='Number of seconds that an operation will wait to get '
+                        'a memcache client connection.'),
+    ],
+    'ssl': [
+        cfg.StrOpt('ca_key',
+                   default='/etc/keystone/ssl/private/cakey.pem',
+                   help='Path of the CA key file for SSL.'),
+        cfg.IntOpt('key_size', default=1024,
+                   help='SSL key length (in bits) (auto generated '
+                        'certificate).'),
+        cfg.IntOpt('valid_days', default=3650,
+                   help='Days the certificate is valid for once signed '
+                        '(auto generated certificate).'),
+        cfg.StrOpt('cert_subject',
+                   default='/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost',
+                   help='SSL certificate subject (auto generated '
+                        'certificate).'),
+    ],
+    'signing': [
+        cfg.StrOpt('certfile',
+                   default=_CERTFILE,
+                   help='Path of the certfile for token signing. For '
+                        'non-production environments, you may be interested '
+                        'in using `keystone-manage pki_setup` to generate '
+                        'self-signed certificates.'),
+        cfg.StrOpt('keyfile',
+                   default=_KEYFILE,
+                   help='Path of the keyfile for token signing.'),
+        cfg.StrOpt('ca_certs',
+                   default='/etc/keystone/ssl/certs/ca.pem',
+                   help='Path of the CA for token signing.'),
+        cfg.StrOpt('ca_key',
+                   default='/etc/keystone/ssl/private/cakey.pem',
+                   help='Path of the CA key for token signing.'),
+        cfg.IntOpt('key_size', default=2048,
+                   help='Key size (in bits) for token signing cert '
+                        '(auto generated certificate).'),
+        cfg.IntOpt('valid_days', default=3650,
+                   help='Days the token signing cert is valid for '
+                        '(auto generated certificate).'),
+        cfg.StrOpt('cert_subject',
+                   default=('/C=US/ST=Unset/L=Unset/O=Unset/'
+                            'CN=www.example.com'),
+                   help='Certificate subject (auto generated certificate) for '
+                        'token signing.'),
+    ],
+    'assignment': [
+        # assignment has no default for backward compatibility reasons.
+        # If assignment driver is not specified, the identity driver chooses
+        # the backend
+        cfg.StrOpt('driver',
+                   help='Assignment backend driver.'),
+    ],
+    'resource': [
+        cfg.StrOpt('driver',
+                   help='Resource backend driver. If a resource driver is '
+                        'not specified, the assignment driver will choose '
+                        'the resource driver.'),
+        cfg.BoolOpt('caching', default=True,
+                    deprecated_opts=[cfg.DeprecatedOpt('caching',
+                                                       group='assignment')],
+                    help='Toggle for resource caching. This has no effect '
+                         'unless global caching is enabled.'),
+        cfg.IntOpt('cache_time',
+                   deprecated_opts=[cfg.DeprecatedOpt('cache_time',
+                                                      group='assignment')],
+                   help='TTL (in seconds) to cache resource data. This has '
+                        'no effect unless global caching is enabled.'),
+        cfg.IntOpt('list_limit',
+                   deprecated_opts=[cfg.DeprecatedOpt('list_limit',
+                                                      group='assignment')],
+                   help='Maximum number of entities that will be returned '
+                        'in a resource collection.'),
+    ],
+    'domain_config': [
+        cfg.StrOpt('driver',
+                   default='keystone.resource.config_backends.sql.'
+                           'DomainConfig',
+                   help='Domain config backend driver.'),
+    ],
+    'role': [
+        # The role driver has no default for backward compatibility reasons.
+        # If role driver is not specified, the assignment driver chooses
+        # the backend
+        cfg.StrOpt('driver',
+                   help='Role backend driver.'),
+        cfg.BoolOpt('caching', default=True,
+                    help='Toggle for role caching. This has no effect '
+                         'unless global caching is enabled.'),
+        cfg.IntOpt('cache_time',
+                   help='TTL (in seconds) to cache role data. This has '
+                        'no effect unless global caching is enabled.'),
+        cfg.IntOpt('list_limit',
+                   help='Maximum number of entities that will be returned '
+                        'in a role collection.'),
+    ],
+    'credential': [
+        cfg.StrOpt('driver',
+                   default=('keystone.credential.backends'
+                            '.sql.Credential'),
+                   help='Credential backend driver.'),
+    ],
+    'oauth1': [
+        cfg.StrOpt('driver',
+                   default='keystone.contrib.oauth1.backends.sql.OAuth1',
+                   help='Credential backend driver.'),
+        cfg.IntOpt('request_token_duration', default=28800,
+                   help='Duration (in seconds) for the OAuth Request Token.'),
+        cfg.IntOpt('access_token_duration', default=86400,
+                   help='Duration (in seconds) for the OAuth Access Token.'),
+    ],
+    'federation': [
+        cfg.StrOpt('driver',
+                   default='keystone.contrib.federation.'
+                           'backends.sql.Federation',
+                   help='Federation backend driver.'),
+        cfg.StrOpt('assertion_prefix', default='',
+                   help='Value to be used when filtering assertion parameters '
+                        'from the environment.'),
+        cfg.StrOpt('remote_id_attribute',
+                   help='Value to be used to obtain the entity ID of the '
+                        'Identity Provider from the environment (e.g. if '
+                        'using the mod_shib plugin this value is '
+                        '`Shib-Identity-Provider`).'),
+        cfg.StrOpt('federated_domain_name', default='Federated',
+                   help='A domain name that is reserved to allow federated '
+                        'ephemeral users to have a domain concept. Note that '
+                        'an admin will not be able to create a domain with '
+                        'this name or update an existing domain to this '
+                        'name. You are not advised to change this value '
+                        'unless you really have to. Changing this option '
+                        'to empty string or None will not have any impact and '
+                        'default name will be used.'),
+        cfg.MultiStrOpt('trusted_dashboard', default=[],
+                        help='A list of trusted dashboard hosts. Before '
+                             'accepting a Single Sign-On request to return a '
+                             'token, the origin host must be a member of the '
+                             'trusted_dashboard list. This configuration '
+                             'option may be repeated for multiple values. '
+                             'For example: trusted_dashboard=http://acme.com '
+                             'trusted_dashboard=http://beta.com'),
+        cfg.StrOpt('sso_callback_template', default=_SSO_CALLBACK,
+                   help='Location of Single Sign-On callback handler, will '
+                        'return a token to a trusted dashboard host.'),
+    ],
+    'policy': [
+        cfg.StrOpt('driver',
+                   default='keystone.policy.backends.sql.Policy',
+                   help='Policy backend driver.'),
+        cfg.IntOpt('list_limit',
+                   help='Maximum number of entities that will be returned '
+                        'in a policy collection.'),
+    ],
+    'endpoint_filter': [
+        cfg.StrOpt('driver',
+                   default='keystone.contrib.endpoint_filter.backends'
+                           '.sql.EndpointFilter',
+                   help='Endpoint Filter backend driver'),
+        cfg.BoolOpt('return_all_endpoints_if_no_filter', default=True,
+                    help='Toggle to return all active endpoints if no filter '
+                         'exists.'),
+    ],
+    'endpoint_policy': [
+        cfg.StrOpt('driver',
+                   default='keystone.contrib.endpoint_policy.backends'
+                           '.sql.EndpointPolicy',
+                   help='Endpoint policy backend driver'),
+    ],
+    'ldap': [
+        cfg.StrOpt('url', default='ldap://localhost',
+                   help='URL for connecting to the LDAP server.'),
+        cfg.StrOpt('user',
+                   help='User BindDN to query the LDAP server.'),
+        cfg.StrOpt('password', secret=True,
+                   help='Password for the BindDN to query the LDAP server.'),
+        cfg.StrOpt('suffix', default='cn=example,cn=com',
+                   help='LDAP server suffix'),
+        cfg.BoolOpt('use_dumb_member', default=False,
+                    help='If true, will add a dummy member to groups. This is '
+                         'required if the objectclass for groups requires the '
+                         '"member" attribute.'),
+        cfg.StrOpt('dumb_member', default='cn=dumb,dc=nonexistent',
+                   help='DN of the "dummy member" to use when '
+                        '"use_dumb_member" is enabled.'),
+        cfg.BoolOpt('allow_subtree_delete', default=False,
+                    help='Delete subtrees using the subtree delete control. '
+                         'Only enable this option if your LDAP server '
+                         'supports subtree deletion.'),
+        cfg.StrOpt('query_scope', default='one',
+                   help='The LDAP scope for queries, this can be either '
+                        '"one" (onelevel/singleLevel) or "sub" '
+                        '(subtree/wholeSubtree).'),
+        cfg.IntOpt('page_size', default=0,
+                   help='Maximum results per page; a value of zero ("0") '
+                        'disables paging.'),
+        cfg.StrOpt('alias_dereferencing', default='default',
+                   help='The LDAP dereferencing option for queries. This '
+                        'can be either "never", "searching", "always", '
+                        '"finding" or "default". The "default" option falls '
+                        'back to using default dereferencing configured by '
+                        'your ldap.conf.'),
+        cfg.IntOpt('debug_level',
+                   help='Sets the LDAP debugging level for LDAP calls. '
+                        'A value of 0 means that debugging is not enabled. '
+                        'This value is a bitmask, consult your LDAP '
+                        'documentation for possible values.'),
+        cfg.BoolOpt('chase_referrals',
+                    help='Override the system\'s default referral chasing '
+                         'behavior for queries.'),
+        cfg.StrOpt('user_tree_dn',
+                   help='Search base for users.'),
+        cfg.StrOpt('user_filter',
+                   help='LDAP search filter for users.'),
+        cfg.StrOpt('user_objectclass', default='inetOrgPerson',
+                   help='LDAP objectclass for users.'),
+        cfg.StrOpt('user_id_attribute', default='cn',
+                   help='LDAP attribute mapped to user id. '
+                        'WARNING: must not be a multivalued attribute.'),
+        cfg.StrOpt('user_name_attribute', default='sn',
+                   help='LDAP attribute mapped to user name.'),
+        cfg.StrOpt('user_mail_attribute', default='mail',
+                   help='LDAP attribute mapped to user email.'),
+        cfg.StrOpt('user_pass_attribute', default='userPassword',
+                   help='LDAP attribute mapped to password.'),
+        cfg.StrOpt('user_enabled_attribute', default='enabled',
+                   help='LDAP attribute mapped to user enabled flag.'),
+        cfg.BoolOpt('user_enabled_invert', default=False,
+                    help='Invert the meaning of the boolean enabled values. '
+                         'Some LDAP servers use a boolean lock attribute '
+                         'where "true" means an account is disabled. Setting '
+                         '"user_enabled_invert = true" will allow these lock '
+                         'attributes to be used. This setting will have no '
+                         'effect if "user_enabled_mask" or '
+                         '"user_enabled_emulation" settings are in use.'),
+        cfg.IntOpt('user_enabled_mask', default=0,
+                   help='Bitmask integer to indicate the bit that the enabled '
+                        'value is stored in if the LDAP server represents '
+                        '"enabled" as a bit on an integer rather than a '
+                        'boolean. A value of "0" indicates the mask is not '
+                        'used. If this is not set to "0" the typical value '
+                        'is "2". This is typically used when '
+                        '"user_enabled_attribute = userAccountControl".'),
+        cfg.StrOpt('user_enabled_default', default='True',
+                   help='Default value to enable users. This should match an '
+                        'appropriate int value if the LDAP server uses '
+                        'non-boolean (bitmask) values to indicate if a user '
+                        'is enabled or disabled. If this is not set to "True" '
+                        'the typical value is "512". This is typically used '
+                        'when "user_enabled_attribute = userAccountControl".'),
+        cfg.ListOpt('user_attribute_ignore',
+                    default=['default_project_id', 'tenants'],
+                    help='List of attributes stripped off the user on '
+                         'update.'),
+        cfg.StrOpt('user_default_project_id_attribute',
+                   help='LDAP attribute mapped to default_project_id for '
+                        'users.'),
+        cfg.BoolOpt('user_allow_create', default=True,
+                    help='Allow user creation in LDAP backend.'),
+        cfg.BoolOpt('user_allow_update', default=True,
+                    help='Allow user updates in LDAP backend.'),
+        cfg.BoolOpt('user_allow_delete', default=True,
+                    help='Allow user deletion in LDAP backend.'),
+        cfg.BoolOpt('user_enabled_emulation', default=False,
+                    help='If true, Keystone uses an alternative method to '
+                         'determine if a user is enabled or not by checking '
+                         'if they are a member of the '
+                         '"user_enabled_emulation_dn" group.'),
+        cfg.StrOpt('user_enabled_emulation_dn',
+                   help='DN of the group entry to hold enabled users when '
+                        'using enabled emulation.'),
+        cfg.ListOpt('user_additional_attribute_mapping',
+                    default=[],
+                    help='List of additional LDAP attributes used for mapping '
+                         'additional attribute mappings for users. Attribute '
+                         'mapping format is <ldap_attr>:<user_attr>, where '
+                         'ldap_attr is the attribute in the LDAP entry and '
+                         'user_attr is the Identity API attribute.'),
+
+        cfg.StrOpt('project_tree_dn',
+                   deprecated_opts=[cfg.DeprecatedOpt(
+                       'tenant_tree_dn', group='ldap')],
+                   help='Search base for projects'),
+        cfg.StrOpt('project_filter',
+                   deprecated_opts=[cfg.DeprecatedOpt(
+                       'tenant_filter', group='ldap')],
+                   help='LDAP search filter for projects.'),
+        cfg.StrOpt('project_objectclass', default='groupOfNames',
+                   deprecated_opts=[cfg.DeprecatedOpt(
+                       'tenant_objectclass', group='ldap')],
+                   help='LDAP objectclass for projects.'),
+        cfg.StrOpt('project_id_attribute', default='cn',
+                   deprecated_opts=[cfg.DeprecatedOpt(
+                       'tenant_id_attribute', group='ldap')],
+                   help='LDAP attribute mapped to project id.'),
+        cfg.StrOpt('project_member_attribute', default='member',
+                   deprecated_opts=[cfg.DeprecatedOpt(
+                       'tenant_member_attribute', group='ldap')],
+                   help='LDAP attribute mapped to project membership for '
+                        'user.'),
+        cfg.StrOpt('project_name_attribute', default='ou',
+                   deprecated_opts=[cfg.DeprecatedOpt(
+                       'tenant_name_attribute', group='ldap')],
+                   help='LDAP attribute mapped to project name.'),
+        cfg.StrOpt('project_desc_attribute', default='description',
+                   deprecated_opts=[cfg.DeprecatedOpt(
+                       'tenant_desc_attribute', group='ldap')],
+                   help='LDAP attribute mapped to project description.'),
+        cfg.StrOpt('project_enabled_attribute', default='enabled',
+                   deprecated_opts=[cfg.DeprecatedOpt(
+                       'tenant_enabled_attribute', group='ldap')],
+                   help='LDAP attribute mapped to project enabled.'),
+        cfg.StrOpt('project_domain_id_attribute',
+                   deprecated_opts=[cfg.DeprecatedOpt(
+                       'tenant_domain_id_attribute', group='ldap')],
+                   default='businessCategory',
+                   help='LDAP attribute mapped to project domain_id.'),
+        cfg.ListOpt('project_attribute_ignore', default=[],
+                    deprecated_opts=[cfg.DeprecatedOpt(
+                        'tenant_attribute_ignore', group='ldap')],
+                    help='List of attributes stripped off the project on '
+                         'update.'),
+        cfg.BoolOpt('project_allow_create', default=True,
+                    deprecated_opts=[cfg.DeprecatedOpt(
+                        'tenant_allow_create', group='ldap')],
+                    help='Allow project creation in LDAP backend.'),
+        cfg.BoolOpt('project_allow_update', default=True,
+                    deprecated_opts=[cfg.DeprecatedOpt(
+                        'tenant_allow_update', group='ldap')],
+                    help='Allow project update in LDAP backend.'),
+        cfg.BoolOpt('project_allow_delete', default=True,
+                    deprecated_opts=[cfg.DeprecatedOpt(
+                        'tenant_allow_delete', group='ldap')],
+                    help='Allow project deletion in LDAP backend.'),
+        cfg.BoolOpt('project_enabled_emulation', default=False,
+                    deprecated_opts=[cfg.DeprecatedOpt(
+                        'tenant_enabled_emulation', group='ldap')],
+                    help='If true, Keystone uses an alternative method to '
+                         'determine if a project is enabled or not by '
+                         'checking if they are a member of the '
+                         '"project_enabled_emulation_dn" group.'),
+        cfg.StrOpt('project_enabled_emulation_dn',
+                   deprecated_opts=[cfg.DeprecatedOpt(
+                       'tenant_enabled_emulation_dn', group='ldap')],
+                   help='DN of the group entry to hold enabled projects when '
+                        'using enabled emulation.'),
+        cfg.ListOpt('project_additional_attribute_mapping',
+                    deprecated_opts=[cfg.DeprecatedOpt(
+                        'tenant_additional_attribute_mapping', group='ldap')],
+                    default=[],
+                    help='Additional attribute mappings for projects. '
+                         'Attribute mapping format is '
+                         '<ldap_attr>:<user_attr>, where ldap_attr is the '
+                         'attribute in the LDAP entry and user_attr is the '
+                         'Identity API attribute.'),
+
+        cfg.StrOpt('role_tree_dn',
+                   help='Search base for roles.'),
+        cfg.StrOpt('role_filter',
+                   help='LDAP search filter for roles.'),
+        cfg.StrOpt('role_objectclass', default='organizationalRole',
+                   help='LDAP objectclass for roles.'),
+        cfg.StrOpt('role_id_attribute', default='cn',
+                   help='LDAP attribute mapped to role id.'),
+        cfg.StrOpt('role_name_attribute', default='ou',
+                   help='LDAP attribute mapped to role name.'),
+        cfg.StrOpt('role_member_attribute', default='roleOccupant',
+                   help='LDAP attribute mapped to role membership.'),
+        cfg.ListOpt('role_attribute_ignore', default=[],
+                    help='List of attributes stripped off the role on '
+                         'update.'),
+        cfg.BoolOpt('role_allow_create', default=True,
+                    help='Allow role creation in LDAP backend.'),
+        cfg.BoolOpt('role_allow_update', default=True,
+                    help='Allow role update in LDAP backend.'),
+        cfg.BoolOpt('role_allow_delete', default=True,
+                    help='Allow role deletion in LDAP backend.'),
+        cfg.ListOpt('role_additional_attribute_mapping',
+                    default=[],
+                    help='Additional attribute mappings for roles. Attribute '
+                         'mapping format is <ldap_attr>:<user_attr>, where '
+                         'ldap_attr is the attribute in the LDAP entry and '
+                         'user_attr is the Identity API attribute.'),
+
+        cfg.StrOpt('group_tree_dn',
+                   help='Search base for groups.'),
+        cfg.StrOpt('group_filter',
+                   help='LDAP search filter for groups.'),
+        cfg.StrOpt('group_objectclass', default='groupOfNames',
+                   help='LDAP objectclass for groups.'),
+        cfg.StrOpt('group_id_attribute', default='cn',
+                   help='LDAP attribute mapped to group id.'),
+        cfg.StrOpt('group_name_attribute', default='ou',
+                   help='LDAP attribute mapped to group name.'),
+        cfg.StrOpt('group_member_attribute', default='member',
+                   help='LDAP attribute mapped to show group membership.'),
+        cfg.StrOpt('group_desc_attribute', default='description',
+                   help='LDAP attribute mapped to group description.'),
+        cfg.ListOpt('group_attribute_ignore', default=[],
+                    help='List of attributes stripped off the group on '
+                         'update.'),
+        cfg.BoolOpt('group_allow_create', default=True,
+                    help='Allow group creation in LDAP backend.'),
+        cfg.BoolOpt('group_allow_update', default=True,
+                    help='Allow group update in LDAP backend.'),
+        cfg.BoolOpt('group_allow_delete', default=True,
+                    help='Allow group deletion in LDAP backend.'),
+        cfg.ListOpt('group_additional_attribute_mapping',
+                    default=[],
+                    help='Additional attribute mappings for groups. Attribute '
+                         'mapping format is <ldap_attr>:<user_attr>, where '
+                         'ldap_attr is the attribute in the LDAP entry and '
+                         'user_attr is the Identity API attribute.'),
+
+        cfg.StrOpt('tls_cacertfile',
+                   help='CA certificate file path for communicating with '
+                        'LDAP servers.'),
+        cfg.StrOpt('tls_cacertdir',
+                   help='CA certificate directory path for communicating with '
+                        'LDAP servers.'),
+        cfg.BoolOpt('use_tls', default=False,
+                    help='Enable TLS for communicating with LDAP servers.'),
+        cfg.StrOpt('tls_req_cert', default='demand',
+                   help='Valid options for tls_req_cert are demand, never, '
+                        'and allow.'),
+        cfg.BoolOpt('use_pool', default=False,
+                    help='Enable LDAP connection pooling.'),
+        cfg.IntOpt('pool_size', default=10,
+                   help='Connection pool size.'),
+        cfg.IntOpt('pool_retry_max', default=3,
+                   help='Maximum count of reconnect trials.'),
+        cfg.FloatOpt('pool_retry_delay', default=0.1,
+                     help='Time span in seconds to wait between two '
+                          'reconnect trials.'),
+        cfg.IntOpt('pool_connection_timeout', default=-1,
+                   help='Connector timeout in seconds. Value -1 indicates '
+                        'indefinite wait for response.'),
+        cfg.IntOpt('pool_connection_lifetime', default=600,
+                   help='Connection lifetime in seconds.'),
+        cfg.BoolOpt('use_auth_pool', default=False,
+                    help='Enable LDAP connection pooling for end user '
+                         'authentication. If use_pool is disabled, then this '
+                         'setting is meaningless and is not used at all.'),
+        cfg.IntOpt('auth_pool_size', default=100,
+                   help='End user auth connection pool size.'),
+        cfg.IntOpt('auth_pool_connection_lifetime', default=60,
+                   help='End user auth connection lifetime in seconds.'),
+    ],
+    'auth': [
+        cfg.ListOpt('methods', default=_DEFAULT_AUTH_METHODS,
+                    help='Default auth methods.'),
+        cfg.StrOpt('password',
+                   default='keystone.auth.plugins.password.Password',
+                   help='The password auth plugin module.'),
+        cfg.StrOpt('token',
+                   default='keystone.auth.plugins.token.Token',
+                   help='The token auth plugin module.'),
+        # deals with REMOTE_USER authentication
+        cfg.StrOpt('external',
+                   default='keystone.auth.plugins.external.DefaultDomain',
+                   help='The external (REMOTE_USER) auth plugin module.'),
+        cfg.StrOpt('oauth1',
+                   default='keystone.auth.plugins.oauth1.OAuth',
+                   help='The oAuth1.0 auth plugin module.'),
+    ],
+    'paste_deploy': [
+        cfg.StrOpt('config_file', default='keystone-paste.ini',
+                   help='Name of the paste configuration file that defines '
+                        'the available pipelines.'),
+    ],
+    'memcache': [
+        cfg.ListOpt('servers', default=['localhost:11211'],
+                    help='Memcache servers in the format of "host:port".'),
+        cfg.IntOpt('dead_retry',
+                   default=5 * 60,
+                   help='Number of seconds memcached server is considered dead'
+                        ' before it is tried again. This is used by the key '
+                        'value store system (e.g. token '
+                        'pooled memcached persistence backend).'),
+        cfg.IntOpt('socket_timeout',
+                   default=3,
+                   help='Timeout in seconds for every call to a server. This '
+                        'is used by the key value store system (e.g. token '
+                        'pooled memcached persistence backend).'),
+        cfg.IntOpt('pool_maxsize',
+                   default=10,
+                   help='Max total number of open connections to every'
+                        ' memcached server. This is used by the key value '
+                        'store system (e.g. token pooled memcached '
+                        'persistence backend).'),
+        cfg.IntOpt('pool_unused_timeout',
+                   default=60,
+                   help='Number of seconds a connection to memcached is held'
+                        ' unused in the pool before it is closed. This is used'
+                        ' by the key value store system (e.g. token pooled '
+                        'memcached persistence backend).'),
+        cfg.IntOpt('pool_connection_get_timeout',
+                   default=10,
+                   help='Number of seconds that an operation will wait to get '
+                        'a memcache client connection. This is used by the '
+                        'key value store system (e.g. token pooled memcached '
+                        'persistence backend).'),
+    ],
+    'catalog': [
+        cfg.StrOpt('template_file',
+                   default='default_catalog.templates',
+                   help='Catalog template file name for use with the '
+                        'template catalog backend.'),
+        cfg.StrOpt('driver',
+                   default='keystone.catalog.backends.sql.Catalog',
+                   help='Catalog backend driver.'),
+        cfg.BoolOpt('caching', default=True,
+                    help='Toggle for catalog caching. This has no '
+                         'effect unless global caching is enabled.'),
+        cfg.IntOpt('cache_time',
+                   help='Time to cache catalog data (in seconds). This has no '
+                        'effect unless global and catalog caching are '
+                        'enabled.'),
+        cfg.IntOpt('list_limit',
+                   help='Maximum number of entities that will be returned '
+                        'in a catalog collection.'),
+    ],
+    'kvs': [
+        cfg.ListOpt('backends', default=[],
+                    help='Extra dogpile.cache backend modules to register '
+                         'with the dogpile.cache library.'),
+        cfg.StrOpt('config_prefix', default='keystone.kvs',
+                   help='Prefix for building the configuration dictionary '
+                        'for the KVS region. This should not need to be '
+                        'changed unless there is another dogpile.cache '
+                        'region with the same configuration name.'),
+        cfg.BoolOpt('enable_key_mangler', default=True,
+                    help='Toggle to disable using a key-mangling function '
+                         'to ensure fixed length keys. This is toggle-able '
+                         'for debugging purposes, it is highly recommended '
+                         'to always leave this set to true.'),
+        cfg.IntOpt('default_lock_timeout', default=5,
+                   help='Default lock timeout (in seconds) for distributed '
+                        'locking.'),
+    ],
+    'saml': [
+        cfg.IntOpt('assertion_expiration_time', default=3600,
+                   help='Default TTL, in seconds, for any generated SAML '
+                        'assertion created by Keystone.'),
+        cfg.StrOpt('xmlsec1_binary',
+                   default='xmlsec1',
+                   help='Binary to be called for XML signing. Install the '
+                        'appropriate package, specify absolute path or adjust '
+                        'your PATH environment variable if the binary cannot '
+                        'be found.'),
+        cfg.StrOpt('certfile',
+                   default=_CERTFILE,
+                   help='Path of the certfile for SAML signing. For '
+                        'non-production environments, you may be interested '
+                        'in using `keystone-manage pki_setup` to generate '
+                        'self-signed certificates. Note, the path cannot '
+                        'contain a comma.'),
+        cfg.StrOpt('keyfile',
+                   default=_KEYFILE,
+                   help='Path of the keyfile for SAML signing. Note, the path '
+                        'cannot contain a comma.'),
+        cfg.StrOpt('idp_entity_id',
+                   help='Entity ID value for unique Identity Provider '
+                        'identification. Usually FQDN is set with a suffix. '
+                        'A value is required to generate IDP Metadata. '
+                        'For example: https://keystone.example.com/v3/'
+                        'OS-FEDERATION/saml2/idp'),
+        cfg.StrOpt('idp_sso_endpoint',
+                   help='Identity Provider Single-Sign-On service value, '
+                        'required in the Identity Provider\'s metadata. '
+                        'A value is required to generate IDP Metadata. '
+                        'For example: https://keystone.example.com/v3/'
+                        'OS-FEDERATION/saml2/sso'),
+        cfg.StrOpt('idp_lang', default='en',
+                   help='Language used by the organization.'),
+        cfg.StrOpt('idp_organization_name',
+                   help='Organization name the installation belongs to.'),
+        cfg.StrOpt('idp_organization_display_name',
+                   help='Organization name to be displayed.'),
+        cfg.StrOpt('idp_organization_url',
+                   help='URL of the organization.'),
+        cfg.StrOpt('idp_contact_company',
+                   help='Company of contact person.'),
+        cfg.StrOpt('idp_contact_name',
+                   help='Given name of contact person'),
+        cfg.StrOpt('idp_contact_surname',
+                   help='Surname of contact person.'),
+        cfg.StrOpt('idp_contact_email',
+                   help='Email address of contact person.'),
+        cfg.StrOpt('idp_contact_telephone',
+                   help='Telephone number of contact person.'),
+        cfg.StrOpt('idp_contact_type', default='other',
+                   help='Contact type. Allowed values are: '
+                        'technical, support, administrative '
+                        'billing, and other'),
+        cfg.StrOpt('idp_metadata_path',
+                   default='/etc/keystone/saml2_idp_metadata.xml',
+                   help='Path to the Identity Provider Metadata file. '
+                        'This file should be generated with the '
+                        'keystone-manage saml_idp_metadata command.'),
+    ],
+    'eventlet_server': [
+        cfg.IntOpt('public_workers',
+                   deprecated_name='public_workers',
+                   deprecated_group='DEFAULT',
+                   help='The number of worker processes to serve the public '
+                        'eventlet application. Defaults to number of CPUs '
+                        '(minimum of 2).'),
+        cfg.IntOpt('admin_workers',
+                   deprecated_name='admin_workers',
+                   deprecated_group='DEFAULT',
+                   help='The number of worker processes to serve the admin '
+                        'eventlet application. Defaults to number of CPUs '
+                        '(minimum of 2).'),
+        cfg.StrOpt('public_bind_host',
+                   default='0.0.0.0',
+                   deprecated_opts=[cfg.DeprecatedOpt('bind_host',
+                                                      group='DEFAULT'),
+                                    cfg.DeprecatedOpt('public_bind_host',
+                                                      group='DEFAULT'), ],
+                   help='The IP address of the network interface for the '
+                        'public service to listen on.'),
+        cfg.IntOpt('public_port', default=5000, deprecated_name='public_port',
+                   deprecated_group='DEFAULT',
+                   help='The port number which the public service listens '
+                        'on.'),
+        cfg.StrOpt('admin_bind_host',
+                   default='0.0.0.0',
+                   deprecated_opts=[cfg.DeprecatedOpt('bind_host',
+                                                      group='DEFAULT'),
+                                    cfg.DeprecatedOpt('admin_bind_host',
+                                                      group='DEFAULT')],
+                   help='The IP address of the network interface for the '
+                        'admin service to listen on.'),
+        cfg.IntOpt('admin_port', default=35357, deprecated_name='admin_port',
+                   deprecated_group='DEFAULT',
+                   help='The port number which the admin service listens '
+                        'on.'),
+        cfg.BoolOpt('tcp_keepalive', default=False,
+                    deprecated_name='tcp_keepalive',
+                    deprecated_group='DEFAULT',
+                    help='Set this to true if you want to enable '
+                         'TCP_KEEPALIVE on server sockets, i.e. sockets used '
+                         'by the Keystone wsgi server for client '
+                         'connections.'),
+        cfg.IntOpt('tcp_keepidle',
+                   default=600,
+                   deprecated_name='tcp_keepidle',
+                   deprecated_group='DEFAULT',
+                   help='Sets the value of TCP_KEEPIDLE in seconds for each '
+                        'server socket. Only applies if tcp_keepalive is '
+                        'true.'),
+    ],
+    'eventlet_server_ssl': [
+        cfg.BoolOpt('enable', default=False, deprecated_name='enable',
+                    deprecated_group='ssl',
+                    help='Toggle for SSL support on the Keystone '
+                         'eventlet servers.'),
+        cfg.StrOpt('certfile',
+                   default="/etc/keystone/ssl/certs/keystone.pem",
+                   deprecated_name='certfile', deprecated_group='ssl',
+                   help='Path of the certfile for SSL. For non-production '
+                        'environments, you may be interested in using '
+                        '`keystone-manage ssl_setup` to generate self-signed '
+                        'certificates.'),
+        cfg.StrOpt('keyfile',
+                   default='/etc/keystone/ssl/private/keystonekey.pem',
+                   deprecated_name='keyfile', deprecated_group='ssl',
+                   help='Path of the keyfile for SSL.'),
+        cfg.StrOpt('ca_certs',
+                   default='/etc/keystone/ssl/certs/ca.pem',
+                   deprecated_name='ca_certs', deprecated_group='ssl',
+                   help='Path of the CA cert file for SSL.'),
+        cfg.BoolOpt('cert_required', default=False,
+                    deprecated_name='cert_required', deprecated_group='ssl',
+                    help='Require client certificate.'),
+    ],
+}
+
+
+CONF = cfg.CONF
+oslo_messaging.set_transport_defaults(control_exchange='keystone')
+
+
+def _register_auth_plugin_opt(conf, option):
+    conf.register_opt(option, group='auth')
+
+
+def setup_authentication(conf=None):
+    # register any non-default auth methods here (used by extensions, etc)
+    if conf is None:
+        conf = CONF
+    for method_name in conf.auth.methods:
+        if method_name not in _DEFAULT_AUTH_METHODS:
+            option = cfg.StrOpt(method_name)
+            _register_auth_plugin_opt(conf, option)
+
+
+def configure(conf=None):
+    if conf is None:
+        conf = CONF
+
+    conf.register_cli_opt(
+        cfg.BoolOpt('standard-threads', default=False,
+                    help='Do not monkey-patch threading system modules.'))
+    conf.register_cli_opt(
+        cfg.StrOpt('pydev-debug-host',
+                   help='Host to connect to for remote debugger.'))
+    conf.register_cli_opt(
+        cfg.IntOpt('pydev-debug-port',
+                   help='Port to connect to for remote debugger.'))
+
+    for section in FILE_OPTIONS:
+        for option in FILE_OPTIONS[section]:
+            if section:
+                conf.register_opt(option, group=section)
+            else:
+                conf.register_opt(option)
+
+    # register any non-default auth methods here (used by extensions, etc)
+    setup_authentication(conf)
+
+
+def list_opts():
+    """Return a list of oslo_config options available in Keystone.
+
+    The returned list includes all oslo_config options which are registered as
+    the "FILE_OPTIONS" in keystone.common.config. This list will not include
+    the options from the oslo-incubator library or any options registered
+    dynamically at run time.
+
+    Each object in the list is a two element tuple. The first element of
+    each tuple is the name of the group under which the list of options in the
+    second element will be registered. A group name of None corresponds to the
+    [DEFAULT] group in config files.
+
+    This function is also discoverable via the 'oslo_config.opts' entry point
+    under the 'keystone.config.opts' namespace.
+
+    The purpose of this is to allow tools like the Oslo sample config file
+    generator to discover the options exposed to users by this library.
+
+    :returns: a list of (group_name, opts) tuples
+    """
+    return FILE_OPTIONS.items()
diff --git a/keystone-moon/keystone/common/controller.py b/keystone-moon/keystone/common/controller.py
new file mode 100644 (file)
index 0000000..bd26b7c
--- /dev/null
@@ -0,0 +1,800 @@
+# Copyright 2013 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import functools
+import uuid
+
+from oslo_config import cfg
+from oslo_log import log
+import six
+
+from keystone.common import authorization
+from keystone.common import dependency
+from keystone.common import driver_hints
+from keystone.common import utils
+from keystone.common import wsgi
+from keystone import exception
+from keystone.i18n import _, _LW
+from keystone.models import token_model
+
+
+LOG = log.getLogger(__name__)
+CONF = cfg.CONF
+
+
+def v2_deprecated(f):
+    """No-op decorator in preparation for deprecating Identity API v2.
+
+    This is a placeholder for the pending deprecation of v2. The implementation
+    of this decorator can be replaced with::
+
+        from keystone.openstack.common import versionutils
+
+
+        v2_deprecated = versionutils.deprecated(
+            what='v2 API',
+            as_of=versionutils.deprecated.JUNO,
+            in_favor_of='v3 API')
+
+    """
+    return f
+
+
+def _build_policy_check_credentials(self, action, context, kwargs):
+    LOG.debug('RBAC: Authorizing %(action)s(%(kwargs)s)', {
+        'action': action,
+        'kwargs': ', '.join(['%s=%s' % (k, kwargs[k]) for k in kwargs])})
+
+    # see if auth context has already been created. If so use it.
+    if ('environment' in context and
+            authorization.AUTH_CONTEXT_ENV in context['environment']):
+        LOG.debug('RBAC: using auth context from the request environment')
+        return context['environment'].get(authorization.AUTH_CONTEXT_ENV)
+
+    # There is no current auth context, build it from the incoming token.
+    # TODO(morganfainberg): Collapse this logic with AuthContextMiddleware
+    # in a sane manner as this just mirrors the logic in AuthContextMiddleware
+    try:
+        LOG.debug('RBAC: building auth context from the incoming auth token')
+        token_ref = token_model.KeystoneToken(
+            token_id=context['token_id'],
+            token_data=self.token_provider_api.validate_token(
+                context['token_id']))
+        # NOTE(jamielennox): whilst this maybe shouldn't be within this
+        # function it would otherwise need to reload the token_ref from
+        # backing store.
+        wsgi.validate_token_bind(context, token_ref)
+    except exception.TokenNotFound:
+        LOG.warning(_LW('RBAC: Invalid token'))
+        raise exception.Unauthorized()
+
+    auth_context = authorization.token_to_auth_context(token_ref)
+
+    return auth_context
+
+
+def protected(callback=None):
+    """Wraps API calls with role based access controls (RBAC).
+
+    This handles both the protection of the API parameters as well as any
+    target entities for single-entity API calls.
+
+    More complex API calls (for example that deal with several different
+    entities) should pass in a callback function, that will be subsequently
+    called to check protection for these multiple entities. This callback
+    function should gather the appropriate entities needed and then call
+    check_protection() in the V3Controller class.
+
+    """
+    def wrapper(f):
+        @functools.wraps(f)
+        def inner(self, context, *args, **kwargs):
+            if 'is_admin' in context and context['is_admin']:
+                LOG.warning(_LW('RBAC: Bypassing authorization'))
+            elif callback is not None:
+                prep_info = {'f_name': f.__name__,
+                             'input_attr': kwargs}
+                callback(self, context, prep_info, *args, **kwargs)
+            else:
+                action = 'identity:%s' % f.__name__
+                creds = _build_policy_check_credentials(self, action,
+                                                        context, kwargs)
+
+                policy_dict = {}
+
+                # Check to see if we need to include the target entity in our
+                # policy checks.  We deduce this by seeing if the class has
+                # specified a get_member() method and that kwargs contains the
+                # appropriate entity id.
+                if (hasattr(self, 'get_member_from_driver') and
+                        self.get_member_from_driver is not None):
+                    key = '%s_id' % self.member_name
+                    if key in kwargs:
+                        ref = self.get_member_from_driver(kwargs[key])
+                        policy_dict['target'] = {self.member_name: ref}
+
+                # TODO(henry-nash): Move this entire code to a member
+                # method inside v3 Auth
+                if context.get('subject_token_id') is not None:
+                    token_ref = token_model.KeystoneToken(
+                        token_id=context['subject_token_id'],
+                        token_data=self.token_provider_api.validate_token(
+                            context['subject_token_id']))
+                    policy_dict.setdefault('target', {})
+                    policy_dict['target'].setdefault(self.member_name, {})
+                    policy_dict['target'][self.member_name]['user_id'] = (
+                        token_ref.user_id)
+                    try:
+                        user_domain_id = token_ref.user_domain_id
+                    except exception.UnexpectedError:
+                        user_domain_id = None
+                    if user_domain_id:
+                        policy_dict['target'][self.member_name].setdefault(
+                            'user', {})
+                        policy_dict['target'][self.member_name][
+                            'user'].setdefault('domain', {})
+                        policy_dict['target'][self.member_name]['user'][
+                            'domain']['id'] = (
+                                user_domain_id)
+
+                # Add in the kwargs, which means that any entity provided as a
+                # parameter for calls like create and update will be included.
+                policy_dict.update(kwargs)
+                self.policy_api.enforce(creds,
+                                        action,
+                                        utils.flatten_dict(policy_dict))
+                LOG.debug('RBAC: Authorization granted')
+            return f(self, context, *args, **kwargs)
+        return inner
+    return wrapper
+
+
+def filterprotected(*filters):
+    """Wraps filtered API calls with role based access controls (RBAC)."""
+
+    def _filterprotected(f):
+        @functools.wraps(f)
+        def wrapper(self, context, **kwargs):
+            if not context['is_admin']:
+                action = 'identity:%s' % f.__name__
+                creds = _build_policy_check_credentials(self, action,
+                                                        context, kwargs)
+                # Now, build the target dict for policy check.  We include:
+                #
+                # - Any query filter parameters
+                # - Data from the main url (which will be in the kwargs
+                #   parameter) and would typically include the prime key
+                #   of a get/update/delete call
+                #
+                # First  any query filter parameters
+                target = dict()
+                if filters:
+                    for item in filters:
+                        if item in context['query_string']:
+                            target[item] = context['query_string'][item]
+
+                    LOG.debug('RBAC: Adding query filter params (%s)', (
+                        ', '.join(['%s=%s' % (item, target[item])
+                                  for item in target])))
+
+                # Now any formal url parameters
+                for key in kwargs:
+                    target[key] = kwargs[key]
+
+                self.policy_api.enforce(creds,
+                                        action,
+                                        utils.flatten_dict(target))
+
+                LOG.debug('RBAC: Authorization granted')
+            else:
+                LOG.warning(_LW('RBAC: Bypassing authorization'))
+            return f(self, context, filters, **kwargs)
+        return wrapper
+    return _filterprotected
+
+
+class V2Controller(wsgi.Application):
+    """Base controller class for Identity API v2."""
+    def _normalize_domain_id(self, context, ref):
+        """Fill in domain_id since v2 calls are not domain-aware.
+
+        This will overwrite any domain_id that was inadvertently
+        specified in the v2 call.
+
+        """
+        ref['domain_id'] = CONF.identity.default_domain_id
+        return ref
+
+    @staticmethod
+    def filter_domain_id(ref):
+        """Remove domain_id since v2 calls are not domain-aware."""
+        ref.pop('domain_id', None)
+        return ref
+
+    @staticmethod
+    def filter_domain(ref):
+        """Remove domain since v2 calls are not domain-aware.
+
+        V3 Fernet tokens builds the users with a domain in the token data.
+        This method will ensure that users create in v3 belong to the default
+        domain.
+
+        """
+        if 'domain' in ref:
+            if ref['domain'].get('id') != CONF.identity.default_domain_id:
+                raise exception.Unauthorized(
+                    _('Non-default domain is not supported'))
+            del ref['domain']
+        return ref
+
+    @staticmethod
+    def normalize_username_in_response(ref):
+        """Adds username to outgoing user refs to match the v2 spec.
+
+        Internally we use `name` to represent a user's name. The v2 spec
+        requires the use of `username` instead.
+
+        """
+        if 'username' not in ref and 'name' in ref:
+            ref['username'] = ref['name']
+        return ref
+
+    @staticmethod
+    def normalize_username_in_request(ref):
+        """Adds name in incoming user refs to match the v2 spec.
+
+        Internally we use `name` to represent a user's name. The v2 spec
+        requires the use of `username` instead.
+
+        """
+        if 'name' not in ref and 'username' in ref:
+            ref['name'] = ref.pop('username')
+        return ref
+
+    @staticmethod
+    def v3_to_v2_user(ref):
+        """Convert a user_ref from v3 to v2 compatible.
+
+        * v2.0 users are not domain aware, and should have domain_id removed
+        * v2.0 users expect the use of tenantId instead of default_project_id
+        * v2.0 users have a username attribute
+
+        This method should only be applied to user_refs being returned from the
+        v2.0 controller(s).
+
+        If ref is a list type, we will iterate through each element and do the
+        conversion.
+        """
+
+        def _format_default_project_id(ref):
+            """Convert default_project_id to tenantId for v2 calls."""
+            default_project_id = ref.pop('default_project_id', None)
+            if default_project_id is not None:
+                ref['tenantId'] = default_project_id
+            elif 'tenantId' in ref:
+                # NOTE(morganfainberg): To avoid v2.0 confusion if somehow a
+                # tenantId property sneaks its way into the extra blob on the
+                # user, we remove it here.  If default_project_id is set, we
+                # would override it in either case.
+                del ref['tenantId']
+
+        def _normalize_and_filter_user_properties(ref):
+            """Run through the various filter/normalization methods."""
+            _format_default_project_id(ref)
+            V2Controller.filter_domain(ref)
+            V2Controller.filter_domain_id(ref)
+            V2Controller.normalize_username_in_response(ref)
+            return ref
+
+        if isinstance(ref, dict):
+            return _normalize_and_filter_user_properties(ref)
+        elif isinstance(ref, list):
+            return [_normalize_and_filter_user_properties(x) for x in ref]
+        else:
+            raise ValueError(_('Expected dict or list: %s') % type(ref))
+
+    def format_project_list(self, tenant_refs, **kwargs):
+        """Format a v2 style project list, including marker/limits."""
+        marker = kwargs.get('marker')
+        first_index = 0
+        if marker is not None:
+            for (marker_index, tenant) in enumerate(tenant_refs):
+                if tenant['id'] == marker:
+                    # we start pagination after the marker
+                    first_index = marker_index + 1
+                    break
+            else:
+                msg = _('Marker could not be found')
+                raise exception.ValidationError(message=msg)
+
+        limit = kwargs.get('limit')
+        last_index = None
+        if limit is not None:
+            try:
+                limit = int(limit)
+                if limit < 0:
+                    raise AssertionError()
+            except (ValueError, AssertionError):
+                msg = _('Invalid limit value')
+                raise exception.ValidationError(message=msg)
+            last_index = first_index + limit
+
+        tenant_refs = tenant_refs[first_index:last_index]
+
+        for x in tenant_refs:
+            if 'enabled' not in x:
+                x['enabled'] = True
+        o = {'tenants': tenant_refs,
+             'tenants_links': []}
+        return o
+
+
+@dependency.requires('policy_api', 'token_provider_api')
+class V3Controller(wsgi.Application):
+    """Base controller class for Identity API v3.
+
+    Child classes should set the ``collection_name`` and ``member_name`` class
+    attributes, representing the collection of entities they are exposing to
+    the API. This is required for supporting self-referential links,
+    pagination, etc.
+
+    Class parameters:
+
+    * `_mutable_parameters` - set of parameters that can be changed by users.
+                              Usually used by cls.check_immutable_params()
+    * `_public_parameters` - set of parameters that are exposed to the user.
+                             Usually used by cls.filter_params()
+
+    """
+
+    collection_name = 'entities'
+    member_name = 'entity'
+    get_member_from_driver = None
+
+    @classmethod
+    def base_url(cls, context, path=None):
+        endpoint = super(V3Controller, cls).base_url(context, 'public')
+        if not path:
+            path = cls.collection_name
+
+        return '%s/%s/%s' % (endpoint, 'v3', path.lstrip('/'))
+
+    def get_auth_context(self, context):
+        # TODO(dolphm): this method of accessing the auth context is terrible,
+        # but context needs to be refactored to always have reasonable values.
+        env_context = context.get('environment', {})
+        return env_context.get(authorization.AUTH_CONTEXT_ENV, {})
+
+    @classmethod
+    def full_url(cls, context, path=None):
+        url = cls.base_url(context, path)
+        if context['environment'].get('QUERY_STRING'):
+            url = '%s?%s' % (url, context['environment']['QUERY_STRING'])
+
+        return url
+
+    @classmethod
+    def query_filter_is_true(cls, filter_value):
+        """Determine if bool query param is 'True'.
+
+        We treat this the same way as we do for policy
+        enforcement:
+
+        {bool_param}=0 is treated as False
+
+        Any other value is considered to be equivalent to
+        True, including the absence of a value
+
+        """
+
+        if (isinstance(filter_value, six.string_types) and
+                filter_value == '0'):
+            val = False
+        else:
+            val = True
+        return val
+
+    @classmethod
+    def _add_self_referential_link(cls, context, ref):
+        ref.setdefault('links', {})
+        ref['links']['self'] = cls.base_url(context) + '/' + ref['id']
+
+    @classmethod
+    def wrap_member(cls, context, ref):
+        cls._add_self_referential_link(context, ref)
+        return {cls.member_name: ref}
+
+    @classmethod
+    def wrap_collection(cls, context, refs, hints=None):
+        """Wrap a collection, checking for filtering and pagination.
+
+        Returns the wrapped collection, which includes:
+        - Executing any filtering not already carried out
+        - Truncate to a set limit if necessary
+        - Adds 'self' links in every member
+        - Adds 'next', 'self' and 'prev' links for the whole collection.
+
+        :param context: the current context, containing the original url path
+                        and query string
+        :param refs: the list of members of the collection
+        :param hints: list hints, containing any relevant filters and limit.
+                      Any filters already satisfied by managers will have been
+                      removed
+        """
+        # Check if there are any filters in hints that were not
+        # handled by the drivers. The driver will not have paginated or
+        # limited the output if it found there were filters it was unable to
+        # handle.
+
+        if hints is not None:
+            refs = cls.filter_by_attributes(refs, hints)
+
+        list_limited, refs = cls.limit(refs, hints)
+
+        for ref in refs:
+            cls.wrap_member(context, ref)
+
+        container = {cls.collection_name: refs}
+        container['links'] = {
+            'next': None,
+            'self': cls.full_url(context, path=context['path']),
+            'previous': None}
+
+        if list_limited:
+            container['truncated'] = True
+
+        return container
+
+    @classmethod
+    def limit(cls, refs, hints):
+        """Limits a list of entities.
+
+        The underlying driver layer may have already truncated the collection
+        for us, but in case it was unable to handle truncation we check here.
+
+        :param refs: the list of members of the collection
+        :param hints: hints, containing, among other things, the limit
+                      requested
+
+        :returns: boolean indicating whether the list was truncated, as well
+                  as the list of (truncated if necessary) entities.
+
+        """
+        NOT_LIMITED = False
+        LIMITED = True
+
+        if hints is None or hints.limit is None:
+            # No truncation was requested
+            return NOT_LIMITED, refs
+
+        if hints.limit.get('truncated', False):
+            # The driver did truncate the list
+            return LIMITED, refs
+
+        if len(refs) > hints.limit['limit']:
+            # The driver layer wasn't able to truncate it for us, so we must
+            # do it here
+            return LIMITED, refs[:hints.limit['limit']]
+
+        return NOT_LIMITED, refs
+
+    @classmethod
+    def filter_by_attributes(cls, refs, hints):
+        """Filters a list of references by filter values."""
+
+        def _attr_match(ref_attr, val_attr):
+            """Matches attributes allowing for booleans as strings.
+
+            We test explicitly for a value that defines it as 'False',
+            which also means that the existence of the attribute with
+            no value implies 'True'
+
+            """
+            if type(ref_attr) is bool:
+                return ref_attr == utils.attr_as_boolean(val_attr)
+            else:
+                return ref_attr == val_attr
+
+        def _inexact_attr_match(filter, ref):
+            """Applies an inexact filter to a result dict.
+
+            :param filter: the filter in question
+            :param ref: the dict to check
+
+            :returns True if there is a match
+
+            """
+            comparator = filter['comparator']
+            key = filter['name']
+
+            if key in ref:
+                filter_value = filter['value']
+                target_value = ref[key]
+                if not filter['case_sensitive']:
+                    # We only support inexact filters on strings so
+                    # it's OK to use lower()
+                    filter_value = filter_value.lower()
+                    target_value = target_value.lower()
+
+                if comparator == 'contains':
+                    return (filter_value in target_value)
+                elif comparator == 'startswith':
+                    return target_value.startswith(filter_value)
+                elif comparator == 'endswith':
+                    return target_value.endswith(filter_value)
+                else:
+                    # We silently ignore unsupported filters
+                    return True
+
+            return False
+
+        for filter in hints.filters:
+            if filter['comparator'] == 'equals':
+                attr = filter['name']
+                value = filter['value']
+                refs = [r for r in refs if _attr_match(
+                    utils.flatten_dict(r).get(attr), value)]
+            else:
+                # It might be an inexact filter
+                refs = [r for r in refs if _inexact_attr_match(
+                    filter, r)]
+
+        return refs
+
+    @classmethod
+    def build_driver_hints(cls, context, supported_filters):
+        """Build list hints based on the context query string.
+
+        :param context: contains the query_string from which any list hints can
+                        be extracted
+        :param supported_filters: list of filters supported, so ignore any
+                                  keys in query_dict that are not in this list.
+
+        """
+        query_dict = context['query_string']
+        hints = driver_hints.Hints()
+
+        if query_dict is None:
+            return hints
+
+        for key in query_dict:
+            # Check if this is an exact filter
+            if supported_filters is None or key in supported_filters:
+                hints.add_filter(key, query_dict[key])
+                continue
+
+            # Check if it is an inexact filter
+            for valid_key in supported_filters:
+                # See if this entry in query_dict matches a known key with an
+                # inexact suffix added.  If it doesn't match, then that just
+                # means that there is no inexact filter for that key in this
+                # query.
+                if not key.startswith(valid_key + '__'):
+                    continue
+
+                base_key, comparator = key.split('__', 1)
+
+                # We map the query-style inexact of, for example:
+                #
+                # {'email__contains', 'myISP'}
+                #
+                # into a list directive add filter call parameters of:
+                #
+                # name = 'email'
+                # value = 'myISP'
+                # comparator = 'contains'
+                # case_sensitive = True
+
+                case_sensitive = True
+                if comparator.startswith('i'):
+                    case_sensitive = False
+                    comparator = comparator[1:]
+                hints.add_filter(base_key, query_dict[key],
+                                 comparator=comparator,
+                                 case_sensitive=case_sensitive)
+
+        # NOTE(henry-nash): If we were to support pagination, we would pull any
+        # pagination directives out of the query_dict here, and add them into
+        # the hints list.
+        return hints
+
+    def _require_matching_id(self, value, ref):
+        """Ensures the value matches the reference's ID, if any."""
+        if 'id' in ref and ref['id'] != value:
+            raise exception.ValidationError('Cannot change ID')
+
+    def _require_matching_domain_id(self, ref_id, ref, get_member):
+        """Ensure the current domain ID matches the reference one, if any.
+
+        Provided we want domain IDs to be immutable, check whether any
+        domain_id specified in the ref dictionary matches the existing
+        domain_id for this entity.
+
+        :param ref_id: the ID of the entity
+        :param ref: the dictionary of new values proposed for this entity
+        :param get_member: The member function to call to get the current
+                           entity
+        :raises: :class:`keystone.exception.ValidationError`
+
+        """
+        # TODO(henry-nash): It might be safer and more efficient to do this
+        # check in the managers affected, so look to migrate this check to
+        # there in the future.
+        if CONF.domain_id_immutable and 'domain_id' in ref:
+            existing_ref = get_member(ref_id)
+            if ref['domain_id'] != existing_ref['domain_id']:
+                raise exception.ValidationError(_('Cannot change Domain ID'))
+
+    def _assign_unique_id(self, ref):
+        """Generates and assigns a unique identifier to a reference."""
+        ref = ref.copy()
+        ref['id'] = uuid.uuid4().hex
+        return ref
+
+    def _get_domain_id_for_list_request(self, context):
+        """Get the domain_id for a v3 list call.
+
+        If we running with multiple domain drivers, then the caller must
+        specify a domain_id either as a filter or as part of the token scope.
+
+        """
+        if not CONF.identity.domain_specific_drivers_enabled:
+            # We don't need to specify a domain ID in this case
+            return
+
+        if context['query_string'].get('domain_id') is not None:
+            return context['query_string'].get('domain_id')
+
+        try:
+            token_ref = token_model.KeystoneToken(
+                token_id=context['token_id'],
+                token_data=self.token_provider_api.validate_token(
+                    context['token_id']))
+        except KeyError:
+            raise exception.ValidationError(
+                _('domain_id is required as part of entity'))
+        except (exception.TokenNotFound,
+                exception.UnsupportedTokenVersionException):
+            LOG.warning(_LW('Invalid token found while getting domain ID '
+                            'for list request'))
+            raise exception.Unauthorized()
+
+        if token_ref.domain_scoped:
+            return token_ref.domain_id
+        else:
+            LOG.warning(
+                _LW('No domain information specified as part of list request'))
+            raise exception.Unauthorized()
+
+    def _get_domain_id_from_token(self, context):
+        """Get the domain_id for a v3 create call.
+
+        In the case of a v3 create entity call that does not specify a domain
+        ID, the spec says that we should use the domain scoping from the token
+        being used.
+
+        """
+        # We could make this more efficient by loading the domain_id
+        # into the context in the wrapper function above (since
+        # this version of normalize_domain will only be called inside
+        # a v3 protected call).  However, this optimization is probably not
+        # worth the duplication of state
+        try:
+            token_ref = token_model.KeystoneToken(
+                token_id=context['token_id'],
+                token_data=self.token_provider_api.validate_token(
+                    context['token_id']))
+        except KeyError:
+            # This might happen if we use the Admin token, for instance
+            raise exception.ValidationError(
+                _('A domain-scoped token must be used'))
+        except (exception.TokenNotFound,
+                exception.UnsupportedTokenVersionException):
+            LOG.warning(_LW('Invalid token found while getting domain ID '
+                            'for list request'))
+            raise exception.Unauthorized()
+
+        if token_ref.domain_scoped:
+            return token_ref.domain_id
+        else:
+            # TODO(henry-nash): We should issue an exception here since if
+            # a v3 call does not explicitly specify the domain_id in the
+            # entity, it should be using a domain scoped token.  However,
+            # the current tempest heat tests issue a v3 call without this.
+            # This is raised as bug #1283539.  Once this is fixed, we
+            # should remove the line below and replace it with an error.
+            return CONF.identity.default_domain_id
+
+    def _normalize_domain_id(self, context, ref):
+        """Fill in domain_id if not specified in a v3 call."""
+        if 'domain_id' not in ref:
+            ref['domain_id'] = self._get_domain_id_from_token(context)
+        return ref
+
+    @staticmethod
+    def filter_domain_id(ref):
+        """Override v2 filter to let domain_id out for v3 calls."""
+        return ref
+
+    def check_protection(self, context, prep_info, target_attr=None):
+        """Provide call protection for complex target attributes.
+
+        As well as including the standard parameters from the original API
+        call (which is passed in prep_info), this call will add in any
+        additional entities or attributes (passed in target_attr), so that
+        they can be referenced by policy rules.
+
+         """
+        if 'is_admin' in context and context['is_admin']:
+            LOG.warning(_LW('RBAC: Bypassing authorization'))
+        else:
+            action = 'identity:%s' % prep_info['f_name']
+            # TODO(henry-nash) need to log the target attributes as well
+            creds = _build_policy_check_credentials(self, action,
+                                                    context,
+                                                    prep_info['input_attr'])
+            # Build the dict the policy engine will check against from both the
+            # parameters passed into the call we are protecting (which was
+            # stored in the prep_info by protected()), plus the target
+            # attributes provided.
+            policy_dict = {}
+            if target_attr:
+                policy_dict = {'target': target_attr}
+            policy_dict.update(prep_info['input_attr'])
+            self.policy_api.enforce(creds,
+                                    action,
+                                    utils.flatten_dict(policy_dict))
+            LOG.debug('RBAC: Authorization granted')
+
+    @classmethod
+    def check_immutable_params(cls, ref):
+        """Raise exception when disallowed parameter is in ref.
+
+        Check whether the ref dictionary representing a request has only
+        mutable parameters included. If not, raise an exception. This method
+        checks only root-level keys from a ref dictionary.
+
+        :param ref: a dictionary representing deserialized request to be
+                    stored
+        :raises: :class:`keystone.exception.ImmutableAttributeError`
+
+        """
+        ref_keys = set(ref.keys())
+        blocked_keys = ref_keys.difference(cls._mutable_parameters)
+
+        if not blocked_keys:
+            # No immutable parameters changed
+            return
+
+        exception_args = {'target': cls.__name__,
+                          'attributes': ', '.join(blocked_keys)}
+        raise exception.ImmutableAttributeError(**exception_args)
+
+    @classmethod
+    def filter_params(cls, ref):
+        """Remove unspecified parameters from the dictionary.
+
+        This function removes unspecified parameters from the dictionary. See
+        check_immutable_parameters for corresponding function that raises
+        exceptions. This method checks only root-level keys from a ref
+        dictionary.
+
+        :param ref: a dictionary representing deserialized response to be
+                    serialized
+        """
+        ref_keys = set(ref.keys())
+        blocked_keys = ref_keys - cls._public_parameters
+        for blocked_param in blocked_keys:
+            del ref[blocked_param]
+        return ref
diff --git a/keystone-moon/keystone/common/dependency.py b/keystone-moon/keystone/common/dependency.py
new file mode 100644 (file)
index 0000000..14a68f1
--- /dev/null
@@ -0,0 +1,311 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""This module provides support for dependency injection.
+
+Providers are registered via the ``@provider()`` decorator, and dependencies on
+them are registered with ``@requires()`` or ``@optional()``. Providers are
+available to their consumers via an attribute. See the documentation for the
+individual functions for more detail.
+
+See also:
+
+    https://en.wikipedia.org/wiki/Dependency_injection
+
+"""
+
+import traceback
+
+import six
+
+from keystone.i18n import _
+from keystone import notifications
+
+
+_REGISTRY = {}
+
+_future_dependencies = {}
+_future_optionals = {}
+_factories = {}
+
+
+def _set_provider(name, provider):
+    _original_provider, where_registered = _REGISTRY.get(name, (None, None))
+    if where_registered:
+        raise Exception('%s already has a registered provider, at\n%s' %
+                        (name, ''.join(where_registered)))
+    _REGISTRY[name] = (provider, traceback.format_stack())
+
+
+GET_REQUIRED = object()
+GET_OPTIONAL = object()
+
+
+def get_provider(name, optional=GET_REQUIRED):
+    if optional is GET_REQUIRED:
+        return _REGISTRY[name][0]
+    return _REGISTRY.get(name, (None, None))[0]
+
+
+class UnresolvableDependencyException(Exception):
+    """Raised when a required dependency is not resolvable.
+
+    See ``resolve_future_dependencies()`` for more details.
+
+    """
+    def __init__(self, name, targets):
+        msg = _('Unregistered dependency: %(name)s for %(targets)s') % {
+            'name': name, 'targets': targets}
+        super(UnresolvableDependencyException, self).__init__(msg)
+
+
+def provider(name):
+    """A class decorator used to register providers.
+
+    When ``@provider()`` is used to decorate a class, members of that class
+    will register themselves as providers for the named dependency. As an
+    example, In the code fragment::
+
+        @dependency.provider('foo_api')
+        class Foo:
+            def __init__(self):
+                ...
+
+            ...
+
+        foo = Foo()
+
+    The object ``foo`` will be registered as a provider for ``foo_api``. No
+    more than one such instance should be created; additional instances will
+    replace the previous ones, possibly resulting in different instances being
+    used by different consumers.
+
+    """
+    def wrapper(cls):
+        def wrapped(init):
+            def register_event_callbacks(self):
+                # NOTE(morganfainberg): A provider who has an implicit
+                # dependency on other providers may utilize the event callback
+                # mechanism to react to any changes in those providers. This is
+                # performed at the .provider() mechanism so that we can ensure
+                # that the callback is only ever called once and guaranteed
+                # to be on the properly configured and instantiated backend.
+                if not hasattr(self, 'event_callbacks'):
+                    return
+
+                if not isinstance(self.event_callbacks, dict):
+                    msg = _('event_callbacks must be a dict')
+                    raise ValueError(msg)
+
+                for event in self.event_callbacks:
+                    if not isinstance(self.event_callbacks[event], dict):
+                        msg = _('event_callbacks[%s] must be a dict') % event
+                        raise ValueError(msg)
+                    for resource_type in self.event_callbacks[event]:
+                        # Make sure we register the provider for each event it
+                        # cares to call back.
+                        callbacks = self.event_callbacks[event][resource_type]
+                        if not callbacks:
+                            continue
+                        if not hasattr(callbacks, '__iter__'):
+                            # ensure the callback information is a list
+                            # allowing multiple callbacks to exist
+                            callbacks = [callbacks]
+                        notifications.register_event_callback(event,
+                                                              resource_type,
+                                                              callbacks)
+
+            def __wrapped_init__(self, *args, **kwargs):
+                """Initialize the wrapped object and add it to the registry."""
+                init(self, *args, **kwargs)
+                _set_provider(name, self)
+                register_event_callbacks(self)
+
+                resolve_future_dependencies(__provider_name=name)
+
+            return __wrapped_init__
+
+        cls.__init__ = wrapped(cls.__init__)
+        _factories[name] = cls
+        return cls
+    return wrapper
+
+
+def _process_dependencies(obj):
+    # Any dependencies that can be resolved immediately are resolved.
+    # Dependencies that cannot be resolved immediately are stored for
+    # resolution in resolve_future_dependencies.
+
+    def process(obj, attr_name, unresolved_in_out):
+        for dependency in getattr(obj, attr_name, []):
+            if dependency not in _REGISTRY:
+                # We don't know about this dependency, so save it for later.
+                unresolved_in_out.setdefault(dependency, []).append(obj)
+                continue
+
+            setattr(obj, dependency, get_provider(dependency))
+
+    process(obj, '_dependencies', _future_dependencies)
+    process(obj, '_optionals', _future_optionals)
+
+
+def requires(*dependencies):
+    """A class decorator used to inject providers into consumers.
+
+    The required providers will be made available to instances of the decorated
+    class via an attribute with the same name as the provider. For example, in
+    the code fragment::
+
+        @dependency.requires('foo_api', 'bar_api')
+        class FooBarClient:
+            def __init__(self):
+                ...
+
+            ...
+
+        client = FooBarClient()
+
+    The object ``client`` will have attributes named ``foo_api`` and
+    ``bar_api``, which are instances of the named providers.
+
+    Objects must not rely on the existence of these attributes until after
+    ``resolve_future_dependencies()`` has been called; they may not exist
+    beforehand.
+
+    Dependencies registered via ``@required()`` must have providers; if not,
+    an ``UnresolvableDependencyException`` will be raised when
+    ``resolve_future_dependencies()`` is called.
+
+    """
+    def wrapper(self, *args, **kwargs):
+        """Inject each dependency from the registry."""
+        self.__wrapped_init__(*args, **kwargs)
+        _process_dependencies(self)
+
+    def wrapped(cls):
+        """Note the required dependencies on the object for later injection.
+
+        The dependencies of the parent class are combined with that of the
+        child class to create a new set of dependencies.
+
+        """
+        existing_dependencies = getattr(cls, '_dependencies', set())
+        cls._dependencies = existing_dependencies.union(dependencies)
+        if not hasattr(cls, '__wrapped_init__'):
+            cls.__wrapped_init__ = cls.__init__
+            cls.__init__ = wrapper
+        return cls
+
+    return wrapped
+
+
+def optional(*dependencies):
+    """Similar to ``@requires()``, except that the dependencies are optional.
+
+    If no provider is available, the attributes will be set to ``None``.
+
+    """
+    def wrapper(self, *args, **kwargs):
+        """Inject each dependency from the registry."""
+        self.__wrapped_init__(*args, **kwargs)
+        _process_dependencies(self)
+
+    def wrapped(cls):
+        """Note the optional dependencies on the object for later injection.
+
+        The dependencies of the parent class are combined with that of the
+        child class to create a new set of dependencies.
+
+        """
+        existing_optionals = getattr(cls, '_optionals', set())
+        cls._optionals = existing_optionals.union(dependencies)
+        if not hasattr(cls, '__wrapped_init__'):
+            cls.__wrapped_init__ = cls.__init__
+            cls.__init__ = wrapper
+        return cls
+
+    return wrapped
+
+
+def resolve_future_dependencies(__provider_name=None):
+    """Forces injection of all dependencies.
+
+    Before this function is called, circular dependencies may not have been
+    injected. This function should be called only once, after all global
+    providers are registered. If an object needs to be created after this
+    call, it must not have circular dependencies.
+
+    If any required dependencies are unresolvable, this function will raise an
+    ``UnresolvableDependencyException``.
+
+    Outside of this module, this function should be called with no arguments;
+    the optional argument, ``__provider_name`` is used internally, and should
+    be treated as an implementation detail.
+
+    """
+    new_providers = dict()
+    if __provider_name:
+        # A provider was registered, so take care of any objects depending on
+        # it.
+        targets = _future_dependencies.pop(__provider_name, [])
+        targets.extend(_future_optionals.pop(__provider_name, []))
+
+        for target in targets:
+            setattr(target, __provider_name, get_provider(__provider_name))
+
+        return
+
+    # Resolve optional dependencies, sets the attribute to None if there's no
+    # provider registered.
+    for dependency, targets in six.iteritems(_future_optionals.copy()):
+        provider = get_provider(dependency, optional=GET_OPTIONAL)
+        if provider is None:
+            factory = _factories.get(dependency)
+            if factory:
+                provider = factory()
+                new_providers[dependency] = provider
+        for target in targets:
+            setattr(target, dependency, provider)
+
+    # Resolve future dependencies, raises UnresolvableDependencyException if
+    # there's no provider registered.
+    try:
+        for dependency, targets in six.iteritems(_future_dependencies.copy()):
+            if dependency not in _REGISTRY:
+                # a Class was registered that could fulfill the dependency, but
+                # it has not yet been initialized.
+                factory = _factories.get(dependency)
+                if factory:
+                    provider = factory()
+                    new_providers[dependency] = provider
+                else:
+                    raise UnresolvableDependencyException(dependency, targets)
+
+            for target in targets:
+                setattr(target, dependency, get_provider(dependency))
+    finally:
+        _future_dependencies.clear()
+    return new_providers
+
+
+def reset():
+    """Reset the registry of providers.
+
+    This is useful for unit testing to ensure that tests don't use providers
+    from previous tests.
+    """
+
+    _REGISTRY.clear()
+    _future_dependencies.clear()
+    _future_optionals.clear()
diff --git a/keystone-moon/keystone/common/driver_hints.py b/keystone-moon/keystone/common/driver_hints.py
new file mode 100644 (file)
index 0000000..0361e31
--- /dev/null
@@ -0,0 +1,65 @@
+# Copyright 2013 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+class Hints(object):
+    """Encapsulate driver hints for listing entities.
+
+    Hints are modifiers that affect the return of entities from a
+    list_<entities> operation.  They are typically passed to a driver to give
+    direction as to what filtering, pagination or list limiting actions are
+    being requested.
+
+    It is optional for a driver to action some or all of the list hints,
+    but any filters that it does satisfy must be marked as such by calling
+    removing the filter from the list.
+
+    A Hint object contains filters, which is a list of dicts that can be
+    accessed publicly. Also it contains a dict called limit, which will
+    indicate the amount of data we want to limit our listing to.
+
+    Each filter term consists of:
+
+    * ``name``: the name of the attribute being matched
+    * ``value``: the value against which it is being matched
+    * ``comparator``: the operation, which can be one of ``equals``,
+                      ``startswith`` or ``endswith``
+    * ``case_sensitive``: whether any comparison should take account of
+                          case
+    * ``type``: will always be 'filter'
+
+    """
+    def __init__(self):
+        self.limit = None
+        self.filters = list()
+
+    def add_filter(self, name, value, comparator='equals',
+                   case_sensitive=False):
+        """Adds a filter to the filters list, which is publicly accessible."""
+        self.filters.append({'name': name, 'value': value,
+                             'comparator': comparator,
+                             'case_sensitive': case_sensitive,
+                             'type': 'filter'})
+
+    def get_exact_filter_by_name(self, name):
+        """Return a filter key and value if exact filter exists for name."""
+        for entry in self.filters:
+            if (entry['type'] == 'filter' and entry['name'] == name and
+                    entry['comparator'] == 'equals'):
+                return entry
+
+    def set_limit(self, limit, truncated=False):
+        """Set a limit to indicate the list should be truncated."""
+        self.limit = {'limit': limit, 'type': 'limit', 'truncated': truncated}
diff --git a/keystone-moon/keystone/common/environment/__init__.py b/keystone-moon/keystone/common/environment/__init__.py
new file mode 100644 (file)
index 0000000..da1de89
--- /dev/null
@@ -0,0 +1,100 @@
+# Copyright 2013 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import functools
+import os
+
+from oslo_log import log
+
+LOG = log.getLogger(__name__)
+
+
+__all__ = ['Server', 'httplib', 'subprocess']
+
+_configured = False
+
+Server = None
+httplib = None
+subprocess = None
+
+
+def configure_once(name):
+    """Ensure that environment configuration is only run once.
+
+    If environment is reconfigured in the same way then it is ignored.
+    It is an error to attempt to reconfigure environment in a different way.
+    """
+    def decorator(func):
+        @functools.wraps(func)
+        def wrapper(*args, **kwargs):
+            global _configured
+            if _configured:
+                if _configured == name:
+                    return
+                else:
+                    raise SystemError("Environment has already been "
+                                      "configured as %s" % _configured)
+
+            LOG.debug("Environment configured as: %s", name)
+            _configured = name
+            return func(*args, **kwargs)
+
+        return wrapper
+    return decorator
+
+
+@configure_once('eventlet')
+def use_eventlet(monkeypatch_thread=None):
+    global httplib, subprocess, Server
+
+    # This must be set before the initial import of eventlet because if
+    # dnspython is present in your environment then eventlet monkeypatches
+    # socket.getaddrinfo() with an implementation which doesn't work for IPv6.
+    os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
+
+    import eventlet
+    from eventlet.green import httplib as _httplib
+    from eventlet.green import subprocess as _subprocess
+
+    from keystone.common.environment import eventlet_server
+
+    if monkeypatch_thread is None:
+        monkeypatch_thread = not os.getenv('STANDARD_THREADS')
+
+    # Raise the default from 8192 to accommodate large tokens
+    eventlet.wsgi.MAX_HEADER_LINE = 16384
+
+    # NOTE(ldbragst): Explicitly declare what should be monkey patched and
+    # what shouldn't. Doing this allows for more readable code when
+    # understanding Eventlet in Keystone. The following is a complete list
+    # of what is monkey patched instead of passing all=False and then passing
+    # module=True to monkey patch a specific module.
+    eventlet.patcher.monkey_patch(os=False, select=True, socket=True,
+                                  thread=monkeypatch_thread, time=True,
+                                  psycopg=False, MySQLdb=False)
+
+    Server = eventlet_server.Server
+    httplib = _httplib
+    subprocess = _subprocess
+
+
+@configure_once('stdlib')
+def use_stdlib():
+    global httplib, subprocess
+
+    import httplib as _httplib
+    import subprocess as _subprocess
+
+    httplib = _httplib
+    subprocess = _subprocess
diff --git a/keystone-moon/keystone/common/environment/eventlet_server.py b/keystone-moon/keystone/common/environment/eventlet_server.py
new file mode 100644 (file)
index 0000000..639e074
--- /dev/null
@@ -0,0 +1,194 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import errno
+import re
+import socket
+import ssl
+import sys
+
+import eventlet
+import eventlet.wsgi
+import greenlet
+from oslo_log import log
+from oslo_log import loggers
+
+from keystone.i18n import _LE, _LI
+
+
+LOG = log.getLogger(__name__)
+
+# The size of a pool that is used to spawn a single green thread in which
+# a wsgi server is then started. The size of one is enough, because in case
+# of several workers the parent process forks and each child gets a copy
+# of a pool, which does not include any greenthread object as the spawn is
+# done after the fork.
+POOL_SIZE = 1
+
+
+class EventletFilteringLogger(loggers.WritableLogger):
+    # NOTE(morganfainberg): This logger is designed to filter out specific
+    # Tracebacks to limit the amount of data that eventlet can log. In the
+    # case of broken sockets (EPIPE and ECONNRESET), we are seeing a huge
+    # volume of data being written to the logs due to ~14 lines+ per traceback.
+    # The traceback in these cases are, at best, useful for limited debugging
+    # cases.
+    def __init__(self, *args, **kwargs):
+        super(EventletFilteringLogger, self).__init__(*args, **kwargs)
+        self.regex = re.compile(r'errno (%d|%d)' %
+                                (errno.EPIPE, errno.ECONNRESET), re.IGNORECASE)
+
+    def write(self, msg):
+        m = self.regex.search(msg)
+        if m:
+            self.logger.log(log.logging.DEBUG, 'Error(%s) writing to socket.',
+                            m.group(1))
+        else:
+            self.logger.log(self.level, msg.rstrip())
+
+
+class Server(object):
+    """Server class to manage multiple WSGI sockets and applications."""
+
+    def __init__(self, application, host=None, port=None, keepalive=False,
+                 keepidle=None):
+        self.application = application
+        self.host = host or '0.0.0.0'
+        self.port = port or 0
+        # Pool for a green thread in which wsgi server will be running
+        self.pool = eventlet.GreenPool(POOL_SIZE)
+        self.socket_info = {}
+        self.greenthread = None
+        self.do_ssl = False
+        self.cert_required = False
+        self.keepalive = keepalive
+        self.keepidle = keepidle
+        self.socket = None
+
+    def listen(self, key=None, backlog=128):
+        """Create and start listening on socket.
+
+        Call before forking worker processes.
+
+        Raises Exception if this has already been called.
+        """
+
+        # TODO(dims): eventlet's green dns/socket module does not actually
+        # support IPv6 in getaddrinfo(). We need to get around this in the
+        # future or monitor upstream for a fix.
+        # Please refer below link
+        # (https://bitbucket.org/eventlet/eventlet/
+        # src/e0f578180d7d82d2ed3d8a96d520103503c524ec/eventlet/support/
+        # greendns.py?at=0.12#cl-163)
+        info = socket.getaddrinfo(self.host,
+                                  self.port,
+                                  socket.AF_UNSPEC,
+                                  socket.SOCK_STREAM)[0]
+
+        try:
+            self.socket = eventlet.listen(info[-1], family=info[0],
+                                          backlog=backlog)
+        except EnvironmentError:
+            LOG.error(_LE("Could not bind to %(host)s:%(port)s"),
+                      {'host': self.host, 'port': self.port})
+            raise
+
+        LOG.info(_LI('Starting %(arg0)s on %(host)s:%(port)s'),
+                 {'arg0': sys.argv[0],
+                  'host': self.host,
+                  'port': self.port})
+
+    def start(self, key=None, backlog=128):
+        """Run a WSGI server with the given application."""
+
+        if self.socket is None:
+            self.listen(key=key, backlog=backlog)
+
+        dup_socket = self.socket.dup()
+        if key:
+            self.socket_info[key] = self.socket.getsockname()
+        # SSL is enabled
+        if self.do_ssl:
+            if self.cert_required:
+                cert_reqs = ssl.CERT_REQUIRED
+            else:
+                cert_reqs = ssl.CERT_NONE
+
+            dup_socket = eventlet.wrap_ssl(dup_socket, certfile=self.certfile,
+                                           keyfile=self.keyfile,
+                                           server_side=True,
+                                           cert_reqs=cert_reqs,
+                                           ca_certs=self.ca_certs)
+
+        # Optionally enable keepalive on the wsgi socket.
+        if self.keepalive:
+            dup_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
+
+            if self.keepidle is not None:
+                dup_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
+                                      self.keepidle)
+
+        self.greenthread = self.pool.spawn(self._run,
+                                           self.application,
+                                           dup_socket)
+
+    def set_ssl(self, certfile, keyfile=None, ca_certs=None,
+                cert_required=True):
+        self.certfile = certfile
+        self.keyfile = keyfile
+        self.ca_certs = ca_certs
+        self.cert_required = cert_required
+        self.do_ssl = True
+
+    def stop(self):
+        if self.greenthread is not None:
+            self.greenthread.kill()
+
+    def wait(self):
+        """Wait until all servers have completed running."""
+        try:
+            self.pool.waitall()
+        except KeyboardInterrupt:
+            pass
+        except greenlet.GreenletExit:
+            pass
+
+    def reset(self):
+        """Required by the service interface.
+
+        The service interface is used by the launcher when receiving a
+        SIGHUP. The service interface is defined in
+        keystone.openstack.common.service.Service.
+
+        Keystone does not need to do anything here.
+        """
+        pass
+
+    def _run(self, application, socket):
+        """Start a WSGI server with a new green thread pool."""
+        logger = log.getLogger('eventlet.wsgi.server')
+        try:
+            eventlet.wsgi.server(socket, application,
+                                 log=EventletFilteringLogger(logger),
+                                 debug=False)
+        except greenlet.GreenletExit:
+            # Wait until all servers have completed running
+            pass
+        except Exception:
+            LOG.exception(_LE('Server error'))
+            raise
diff --git a/keystone-moon/keystone/common/extension.py b/keystone-moon/keystone/common/extension.py
new file mode 100644 (file)
index 0000000..b2ea80b
--- /dev/null
@@ -0,0 +1,45 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+ADMIN_EXTENSIONS = {}
+PUBLIC_EXTENSIONS = {}
+
+
+def register_admin_extension(url_prefix, extension_data):
+    """Register extension with collection of admin extensions.
+
+    Extensions register the information here that will show
+    up in the /extensions page as a way to indicate that the extension is
+    active.
+
+    url_prefix:  unique key for the extension that will appear in the
+                urls generated by the extension.
+
+    extension_data is a dictionary.  The expected fields are:
+        'name':  short, human readable name of the extension
+        'namespace':  xml namespace
+        'alias':  identifier for the extension
+        'updated':  date the extension was last updated
+        'description':  text description of the extension
+        'links':  hyperlinks to documents describing the extension
+
+    """
+    ADMIN_EXTENSIONS[url_prefix] = extension_data
+
+
+def register_public_extension(url_prefix, extension_data):
+    """Same as register_admin_extension but for public extensions."""
+
+    PUBLIC_EXTENSIONS[url_prefix] = extension_data
diff --git a/keystone-moon/keystone/common/json_home.py b/keystone-moon/keystone/common/json_home.py
new file mode 100644 (file)
index 0000000..215d596
--- /dev/null
@@ -0,0 +1,76 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import six
+
+
+def build_v3_resource_relation(resource_name):
+    return ('http://docs.openstack.org/api/openstack-identity/3/rel/%s' %
+            resource_name)
+
+
+def build_v3_extension_resource_relation(extension_name, extension_version,
+                                         resource_name):
+    return (
+        'http://docs.openstack.org/api/openstack-identity/3/ext/%s/%s/rel/%s' %
+        (extension_name, extension_version, resource_name))
+
+
+def build_v3_parameter_relation(parameter_name):
+    return ('http://docs.openstack.org/api/openstack-identity/3/param/%s' %
+            parameter_name)
+
+
+def build_v3_extension_parameter_relation(extension_name, extension_version,
+                                          parameter_name):
+    return (
+        'http://docs.openstack.org/api/openstack-identity/3/ext/%s/%s/param/'
+        '%s' % (extension_name, extension_version, parameter_name))
+
+
+class Parameters(object):
+    """Relationships for Common parameters."""
+
+    DOMAIN_ID = build_v3_parameter_relation('domain_id')
+    ENDPOINT_ID = build_v3_parameter_relation('endpoint_id')
+    GROUP_ID = build_v3_parameter_relation('group_id')
+    POLICY_ID = build_v3_parameter_relation('policy_id')
+    PROJECT_ID = build_v3_parameter_relation('project_id')
+    REGION_ID = build_v3_parameter_relation('region_id')
+    ROLE_ID = build_v3_parameter_relation('role_id')
+    SERVICE_ID = build_v3_parameter_relation('service_id')
+    USER_ID = build_v3_parameter_relation('user_id')
+
+
+class Status(object):
+    """Status values supported."""
+
+    DEPRECATED = 'deprecated'
+    EXPERIMENTAL = 'experimental'
+    STABLE = 'stable'
+
+    @classmethod
+    def is_supported(cls, status):
+        return status in [cls.DEPRECATED, cls.EXPERIMENTAL, cls.STABLE]
+
+
+def translate_urls(json_home, new_prefix):
+    """Given a JSON Home document, sticks new_prefix on each of the urls."""
+
+    for dummy_rel, resource in six.iteritems(json_home['resources']):
+        if 'href' in resource:
+            resource['href'] = new_prefix + resource['href']
+        elif 'href-template' in resource:
+            resource['href-template'] = new_prefix + resource['href-template']
diff --git a/keystone-moon/keystone/common/kvs/__init__.py b/keystone-moon/keystone/common/kvs/__init__.py
new file mode 100644 (file)
index 0000000..9a406a8
--- /dev/null
@@ -0,0 +1,33 @@
+# Copyright 2013 Metacloud, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from dogpile.cache import region
+
+from keystone.common.kvs.core import *  # noqa
+from keystone.common.kvs.legacy import Base, DictKvs, INMEMDB  # noqa
+
+
+# NOTE(morganfainberg): Provided backends are registered here in the __init__
+# for the kvs system.  Any out-of-tree backends should be registered via the
+# ``backends`` option in the ``[kvs]`` section of the Keystone configuration
+# file.
+region.register_backend(
+    'openstack.kvs.Memory',
+    'keystone.common.kvs.backends.inmemdb',
+    'MemoryBackend')
+
+region.register_backend(
+    'openstack.kvs.Memcached',
+    'keystone.common.kvs.backends.memcached',
+    'MemcachedBackend')
diff --git a/keystone-moon/keystone/common/kvs/backends/__init__.py b/keystone-moon/keystone/common/kvs/backends/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/common/kvs/backends/inmemdb.py b/keystone-moon/keystone/common/kvs/backends/inmemdb.py
new file mode 100644 (file)
index 0000000..68072ef
--- /dev/null
@@ -0,0 +1,69 @@
+# Copyright 2013 Metacloud, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Keystone In-Memory Dogpile.cache backend implementation.
+"""
+
+import copy
+
+from dogpile.cache import api
+
+
+NO_VALUE = api.NO_VALUE
+
+
+class MemoryBackend(api.CacheBackend):
+    """A backend that uses a plain dictionary.
+
+    There is no size management, and values which are placed into the
+    dictionary will remain until explicitly removed. Note that Dogpile's
+    expiration of items is based on timestamps and does not remove them from
+    the cache.
+
+    E.g.::
+
+        from dogpile.cache import make_region
+
+        region = make_region().configure(
+            'keystone.common.kvs.Memory'
+        )
+    """
+    def __init__(self, arguments):
+        self._db = {}
+
+    def _isolate_value(self, value):
+        if value is not NO_VALUE:
+            return copy.deepcopy(value)
+        return value
+
+    def get(self, key):
+        return self._isolate_value(self._db.get(key, NO_VALUE))
+
+    def get_multi(self, keys):
+        return [self.get(key) for key in keys]
+
+    def set(self, key, value):
+        self._db[key] = self._isolate_value(value)
+
+    def set_multi(self, mapping):
+        for key, value in mapping.items():
+            self.set(key, value)
+
+    def delete(self, key):
+        self._db.pop(key, None)
+
+    def delete_multi(self, keys):
+        for key in keys:
+            self.delete(key)
diff --git a/keystone-moon/keystone/common/kvs/backends/memcached.py b/keystone-moon/keystone/common/kvs/backends/memcached.py
new file mode 100644 (file)
index 0000000..db45314
--- /dev/null
@@ -0,0 +1,188 @@
+# Copyright 2013 Metacloud, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Keystone Memcached dogpile.cache backend implementation.
+"""
+
+import random as _random
+import time
+
+from dogpile.cache import api
+from dogpile.cache.backends import memcached
+from oslo_config import cfg
+from oslo_log import log
+
+from keystone.common.cache.backends import memcache_pool
+from keystone.common import manager
+from keystone import exception
+from keystone.i18n import _
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+NO_VALUE = api.NO_VALUE
+random = _random.SystemRandom()
+
+VALID_DOGPILE_BACKENDS = dict(
+    pylibmc=memcached.PylibmcBackend,
+    bmemcached=memcached.BMemcachedBackend,
+    memcached=memcached.MemcachedBackend,
+    pooled_memcached=memcache_pool.PooledMemcachedBackend)
+
+
+class MemcachedLock(object):
+    """Simple distributed lock using memcached.
+
+    This is an adaptation of the lock featured at
+    http://amix.dk/blog/post/19386
+
+    """
+    def __init__(self, client_fn, key, lock_timeout, max_lock_attempts):
+        self.client_fn = client_fn
+        self.key = "_lock" + key
+        self.lock_timeout = lock_timeout
+        self.max_lock_attempts = max_lock_attempts
+
+    def acquire(self, wait=True):
+        client = self.client_fn()
+        for i in range(self.max_lock_attempts):
+            if client.add(self.key, 1, self.lock_timeout):
+                return True
+            elif not wait:
+                return False
+            else:
+                sleep_time = random.random()
+                time.sleep(sleep_time)
+        raise exception.UnexpectedError(
+            _('Maximum lock attempts on %s occurred.') % self.key)
+
+    def release(self):
+        client = self.client_fn()
+        client.delete(self.key)
+
+
+class MemcachedBackend(manager.Manager):
+    """Pivot point to leverage the various dogpile.cache memcached backends.
+
+    To specify a specific dogpile.cache memcached driver, pass the argument
+    `memcached_driver` set to one of the provided memcached drivers (at this
+    time `memcached`, `bmemcached`, `pylibmc` are valid).
+    """
+    def __init__(self, arguments):
+        self._key_mangler = None
+        self.raw_no_expiry_keys = set(arguments.pop('no_expiry_keys', set()))
+        self.no_expiry_hashed_keys = set()
+
+        self.lock_timeout = arguments.pop('lock_timeout', None)
+        self.max_lock_attempts = arguments.pop('max_lock_attempts', 15)
+        # NOTE(morganfainberg): Remove distributed locking from the arguments
+        # passed to the "real" backend if it exists.
+        arguments.pop('distributed_lock', None)
+        backend = arguments.pop('memcached_backend', None)
+        if 'url' not in arguments:
+            # FIXME(morganfainberg): Log deprecation warning for old-style
+            # configuration once full dict_config style configuration for
+            # KVS backends is supported.  For now use the current memcache
+            # section of the configuration.
+            arguments['url'] = CONF.memcache.servers
+
+        if backend is None:
+            # NOTE(morganfainberg): Use the basic memcached backend if nothing
+            # else is supplied.
+            self.driver = VALID_DOGPILE_BACKENDS['memcached'](arguments)
+        else:
+            if backend not in VALID_DOGPILE_BACKENDS:
+                raise ValueError(
+                    _('Backend `%(driver)s` is not a valid memcached '
+                      'backend. Valid drivers: %(driver_list)s') %
+                    {'driver': backend,
+                     'driver_list': ','.join(VALID_DOGPILE_BACKENDS.keys())})
+            else:
+                self.driver = VALID_DOGPILE_BACKENDS[backend](arguments)
+
+    def _get_set_arguments_driver_attr(self, exclude_expiry=False):
+
+        # NOTE(morganfainberg): Shallow copy the .set_arguments dict to
+        # ensure no changes cause the values to change in the instance
+        # variable.
+        set_arguments = getattr(self.driver, 'set_arguments', {}).copy()
+
+        if exclude_expiry:
+            # NOTE(morganfainberg): Explicitly strip out the 'time' key/value
+            # from the set_arguments in the case that this key isn't meant
+            # to expire
+            set_arguments.pop('time', None)
+        return set_arguments
+
+    def set(self, key, value):
+        mapping = {key: value}
+        self.set_multi(mapping)
+
+    def set_multi(self, mapping):
+        mapping_keys = set(mapping.keys())
+        no_expiry_keys = mapping_keys.intersection(self.no_expiry_hashed_keys)
+        has_expiry_keys = mapping_keys.difference(self.no_expiry_hashed_keys)
+
+        if no_expiry_keys:
+            # NOTE(morganfainberg): For keys that have expiry excluded,
+            # bypass the backend and directly call the client. Bypass directly
+            # to the client is required as the 'set_arguments' are applied to
+            # all ``set`` and ``set_multi`` calls by the driver, by calling
+            # the client directly it is possible to exclude the ``time``
+            # argument to the memcached server.
+            new_mapping = {k: mapping[k] for k in no_expiry_keys}
+            set_arguments = self._get_set_arguments_driver_attr(
+                exclude_expiry=True)
+            self.driver.client.set_multi(new_mapping, **set_arguments)
+
+        if has_expiry_keys:
+            new_mapping = {k: mapping[k] for k in has_expiry_keys}
+            self.driver.set_multi(new_mapping)
+
+    @classmethod
+    def from_config_dict(cls, config_dict, prefix):
+        prefix_len = len(prefix)
+        return cls(
+            {key[prefix_len:]: config_dict[key] for key in config_dict
+             if key.startswith(prefix)})
+
+    @property
+    def key_mangler(self):
+        if self._key_mangler is None:
+            self._key_mangler = self.driver.key_mangler
+        return self._key_mangler
+
+    @key_mangler.setter
+    def key_mangler(self, key_mangler):
+        if callable(key_mangler):
+            self._key_mangler = key_mangler
+            self._rehash_keys()
+        elif key_mangler is None:
+            # NOTE(morganfainberg): Set the hashed key map to the unhashed
+            # list since we no longer have a key_mangler.
+            self._key_mangler = None
+            self.no_expiry_hashed_keys = self.raw_no_expiry_keys
+        else:
+            raise TypeError(_('`key_mangler` functions must be callable.'))
+
+    def _rehash_keys(self):
+        no_expire = set()
+        for key in self.raw_no_expiry_keys:
+            no_expire.add(self._key_mangler(key))
+            self.no_expiry_hashed_keys = no_expire
+
+    def get_mutex(self, key):
+        return MemcachedLock(lambda: self.driver.client, key,
+                             self.lock_timeout, self.max_lock_attempts)
diff --git a/keystone-moon/keystone/common/kvs/core.py b/keystone-moon/keystone/common/kvs/core.py
new file mode 100644 (file)
index 0000000..cbbb746
--- /dev/null
@@ -0,0 +1,423 @@
+# Copyright 2013 Metacloud, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import threading
+import time
+import weakref
+
+from dogpile.cache import api
+from dogpile.cache import proxy
+from dogpile.cache import region
+from dogpile.cache import util as dogpile_util
+from dogpile.core import nameregistry
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import importutils
+import six
+
+from keystone import exception
+from keystone.i18n import _
+from keystone.i18n import _LI
+from keystone.i18n import _LW
+
+
+__all__ = ['KeyValueStore', 'KeyValueStoreLock', 'LockTimeout',
+           'get_key_value_store']
+
+
+BACKENDS_REGISTERED = False
+CONF = cfg.CONF
+KEY_VALUE_STORE_REGISTRY = weakref.WeakValueDictionary()
+LOCK_WINDOW = 1
+LOG = log.getLogger(__name__)
+NO_VALUE = api.NO_VALUE
+
+
+def _register_backends():
+    # NOTE(morganfainberg): This function exists to ensure we do not try and
+    # register the backends prior to the configuration object being fully
+    # available.  We also need to ensure we do not register a given backend
+    # more than one time.  All backends will be prefixed with openstack.kvs
+    # as the "short" name to reference them for configuration purposes.  This
+    # function is used in addition to the pre-registered backends in the
+    # __init__ file for the KVS system.
+    global BACKENDS_REGISTERED
+
+    if not BACKENDS_REGISTERED:
+        prefix = 'openstack.kvs.%s'
+        for backend in CONF.kvs.backends:
+            module, cls = backend.rsplit('.', 1)
+            backend_name = prefix % cls
+            LOG.debug(('Registering Dogpile Backend %(backend_path)s as '
+                       '%(backend_name)s'),
+                      {'backend_path': backend, 'backend_name': backend_name})
+            region.register_backend(backend_name, module, cls)
+        BACKENDS_REGISTERED = True
+
+
+class LockTimeout(exception.UnexpectedError):
+    debug_message_format = _('Lock Timeout occurred for key, %(target)s')
+
+
+class KeyValueStore(object):
+    """Basic KVS manager object to support Keystone Key-Value-Store systems.
+
+    This manager also supports the concept of locking a given key resource to
+    allow for a guaranteed atomic transaction to the backend.
+    """
+    def __init__(self, kvs_region):
+        self.locking = True
+        self._lock_timeout = 0
+        self._region = kvs_region
+        self._security_strategy = None
+        self._secret_key = None
+        self._lock_registry = nameregistry.NameRegistry(self._create_mutex)
+
+    def configure(self, backing_store, key_mangler=None, proxy_list=None,
+                  locking=True, **region_config_args):
+        """Configure the KeyValueStore instance.
+
+        :param backing_store: dogpile.cache short name of the region backend
+        :param key_mangler: key_mangler function
+        :param proxy_list: list of proxy classes to apply to the region
+        :param locking: boolean that allows disabling of locking mechanism for
+                        this instantiation
+        :param region_config_args: key-word args passed to the dogpile.cache
+                                   backend for configuration
+        :return:
+        """
+        if self.is_configured:
+            # NOTE(morganfainberg): It is a bad idea to reconfigure a backend,
+            # there are a lot of pitfalls and potential memory leaks that could
+            # occur.  By far the best approach is to re-create the KVS object
+            # with the new configuration.
+            raise RuntimeError(_('KVS region %s is already configured. '
+                                 'Cannot reconfigure.') % self._region.name)
+
+        self.locking = locking
+        self._lock_timeout = region_config_args.pop(
+            'lock_timeout', CONF.kvs.default_lock_timeout)
+        self._configure_region(backing_store, **region_config_args)
+        self._set_key_mangler(key_mangler)
+        self._apply_region_proxy(proxy_list)
+
+    @property
+    def is_configured(self):
+        return 'backend' in self._region.__dict__
+
+    def _apply_region_proxy(self, proxy_list):
+        if isinstance(proxy_list, list):
+            proxies = []
+
+            for item in proxy_list:
+                if isinstance(item, str):
+                    LOG.debug('Importing class %s as KVS proxy.', item)
+                    pxy = importutils.import_class(item)
+                else:
+                    pxy = item
+
+                if issubclass(pxy, proxy.ProxyBackend):
+                    proxies.append(pxy)
+                else:
+                    LOG.warning(_LW('%s is not a dogpile.proxy.ProxyBackend'),
+                                pxy.__name__)
+
+            for proxy_cls in reversed(proxies):
+                LOG.info(_LI('Adding proxy \'%(proxy)s\' to KVS %(name)s.'),
+                         {'proxy': proxy_cls.__name__,
+                          'name': self._region.name})
+                self._region.wrap(proxy_cls)
+
+    def _assert_configured(self):
+        if'backend' not in self._region.__dict__:
+            raise exception.UnexpectedError(_('Key Value Store not '
+                                              'configured: %s'),
+                                            self._region.name)
+
+    def _set_keymangler_on_backend(self, key_mangler):
+            try:
+                self._region.backend.key_mangler = key_mangler
+            except Exception as e:
+                # NOTE(morganfainberg): The setting of the key_mangler on the
+                # backend is used to allow the backend to
+                # calculate a hashed key value as needed. Not all backends
+                # require the ability to calculate hashed keys. If the
+                # backend does not support/require this feature log a
+                # debug line and move on otherwise raise the proper exception.
+                # Support of the feature is implied by the existence of the
+                # 'raw_no_expiry_keys' attribute.
+                if not hasattr(self._region.backend, 'raw_no_expiry_keys'):
+                    LOG.debug(('Non-expiring keys not supported/required by '
+                               '%(region)s backend; unable to set '
+                               'key_mangler for backend: %(err)s'),
+                              {'region': self._region.name, 'err': e})
+                else:
+                    raise
+
+    def _set_key_mangler(self, key_mangler):
+        # Set the key_mangler that is appropriate for the given region being
+        # configured here.  The key_mangler function is called prior to storing
+        # the value(s) in the backend.  This is to help prevent collisions and
+        # limit issues such as memcache's limited cache_key size.
+        use_backend_key_mangler = getattr(self._region.backend,
+                                          'use_backend_key_mangler', False)
+        if ((key_mangler is None or use_backend_key_mangler) and
+                (self._region.backend.key_mangler is not None)):
+            # NOTE(morganfainberg): Use the configured key_mangler as a first
+            # choice. Second choice would be the key_mangler defined by the
+            # backend itself.  Finally, fall back to the defaults.  The one
+            # exception is if the backend defines `use_backend_key_mangler`
+            # as True, which indicates the backend's key_mangler should be
+            # the first choice.
+            key_mangler = self._region.backend.key_mangler
+
+        if CONF.kvs.enable_key_mangler:
+            if key_mangler is not None:
+                msg = _LI('Using %(func)s as KVS region %(name)s key_mangler')
+                if callable(key_mangler):
+                    self._region.key_mangler = key_mangler
+                    LOG.info(msg, {'func': key_mangler.__name__,
+                                   'name': self._region.name})
+                else:
+                    # NOTE(morganfainberg): We failed to set the key_mangler,
+                    # we should error out here to ensure we aren't causing
+                    # key-length or collision issues.
+                    raise exception.ValidationError(
+                        _('`key_mangler` option must be a function reference'))
+            else:
+                LOG.info(_LI('Using default dogpile sha1_mangle_key as KVS '
+                             'region %s key_mangler'), self._region.name)
+                # NOTE(morganfainberg): Sane 'default' keymangler is the
+                # dogpile sha1_mangle_key function.  This ensures that unless
+                # explicitly changed, we mangle keys.  This helps to limit
+                # unintended cases of exceeding cache-key in backends such
+                # as memcache.
+                self._region.key_mangler = dogpile_util.sha1_mangle_key
+            self._set_keymangler_on_backend(self._region.key_mangler)
+        else:
+            LOG.info(_LI('KVS region %s key_mangler disabled.'),
+                     self._region.name)
+            self._set_keymangler_on_backend(None)
+
+    def _configure_region(self, backend, **config_args):
+        prefix = CONF.kvs.config_prefix
+        conf_dict = {}
+        conf_dict['%s.backend' % prefix] = backend
+
+        if 'distributed_lock' not in config_args:
+            config_args['distributed_lock'] = True
+
+        config_args['lock_timeout'] = self._lock_timeout
+
+        # NOTE(morganfainberg): To mitigate race conditions on comparing
+        # the timeout and current time on the lock mutex, we are building
+        # in a static 1 second overlap where the lock will still be valid
+        # in the backend but not from the perspective of the context
+        # manager.  Since we must develop to the lowest-common-denominator
+        # when it comes to the backends, memcache's cache store is not more
+        # refined than 1 second, therefore we must build in at least a 1
+        # second overlap.  `lock_timeout` of 0 means locks never expire.
+        if config_args['lock_timeout'] > 0:
+            config_args['lock_timeout'] += LOCK_WINDOW
+
+        for argument, value in six.iteritems(config_args):
+            arg_key = '.'.join([prefix, 'arguments', argument])
+            conf_dict[arg_key] = value
+
+        LOG.debug('KVS region configuration for %(name)s: %(config)r',
+                  {'name': self._region.name, 'config': conf_dict})
+        self._region.configure_from_config(conf_dict, '%s.' % prefix)
+
+    def _mutex(self, key):
+        return self._lock_registry.get(key)
+
+    def _create_mutex(self, key):
+        mutex = self._region.backend.get_mutex(key)
+        if mutex is not None:
+            return mutex
+        else:
+            return self._LockWrapper(lock_timeout=self._lock_timeout)
+
+    class _LockWrapper(object):
+        """weakref-capable threading.Lock wrapper."""
+        def __init__(self, lock_timeout):
+            self.lock = threading.Lock()
+            self.lock_timeout = lock_timeout
+
+        def acquire(self, wait=True):
+            return self.lock.acquire(wait)
+
+        def release(self):
+            self.lock.release()
+
+    def get(self, key):
+        """Get a single value from the KVS backend."""
+        self._assert_configured()
+        value = self._region.get(key)
+        if value is NO_VALUE:
+            raise exception.NotFound(target=key)
+        return value
+
+    def get_multi(self, keys):
+        """Get multiple values in a single call from the KVS backend."""
+        self._assert_configured()
+        values = self._region.get_multi(keys)
+        not_found = []
+        for index, key in enumerate(keys):
+            if values[index] is NO_VALUE:
+                not_found.append(key)
+        if not_found:
+            # NOTE(morganfainberg): If any of the multi-get values are non-
+            # existent, we should raise a NotFound error to mimic the .get()
+            # method's behavior.  In all cases the internal dogpile NO_VALUE
+            # should be masked from the consumer of the KeyValueStore.
+            raise exception.NotFound(target=not_found)
+        return values
+
+    def set(self, key, value, lock=None):
+        """Set a single value in the KVS backend."""
+        self._assert_configured()
+        with self._action_with_lock(key, lock):
+            self._region.set(key, value)
+
+    def set_multi(self, mapping):
+        """Set multiple key/value pairs in the KVS backend at once.
+
+        Like delete_multi, this call does not serialize through the
+        KeyValueStoreLock mechanism (locking cannot occur on more than one
+        key in a given context without significant deadlock potential).
+        """
+        self._assert_configured()
+        self._region.set_multi(mapping)
+
+    def delete(self, key, lock=None):
+        """Delete a single key from the KVS backend.
+
+        This method will raise NotFound if the key doesn't exist.  The get and
+        delete are done in a single transaction (via KeyValueStoreLock
+        mechanism).
+        """
+        self._assert_configured()
+
+        with self._action_with_lock(key, lock):
+            self.get(key)
+            self._region.delete(key)
+
+    def delete_multi(self, keys):
+        """Delete multiple keys from the KVS backend in a single call.
+
+        Like set_multi, this call does not serialize through the
+        KeyValueStoreLock mechanism (locking cannot occur on more than one
+        key in a given context without significant deadlock potential).
+        """
+        self._assert_configured()
+        self._region.delete_multi(keys)
+
+    def get_lock(self, key):
+        """Get a write lock on the KVS value referenced by `key`.
+
+        The ability to get a context manager to pass into the set/delete
+        methods allows for a single-transaction to occur while guaranteeing the
+        backing store will not change between the start of the 'lock' and the
+        end.  Lock timeout is fixed to the KeyValueStore configured lock
+        timeout.
+        """
+        self._assert_configured()
+        return KeyValueStoreLock(self._mutex(key), key, self.locking,
+                                 self._lock_timeout)
+
+    @contextlib.contextmanager
+    def _action_with_lock(self, key, lock=None):
+        """Wrapper context manager to validate and handle the lock and lock
+        timeout if passed in.
+        """
+        if not isinstance(lock, KeyValueStoreLock):
+            # NOTE(morganfainberg): Locking only matters if a lock is passed in
+            # to this method.  If lock isn't a KeyValueStoreLock, treat this as
+            # if no locking needs to occur.
+            yield
+        else:
+            if not lock.key == key:
+                raise ValueError(_('Lock key must match target key: %(lock)s '
+                                   '!= %(target)s') %
+                                 {'lock': lock.key, 'target': key})
+            if not lock.active:
+                raise exception.ValidationError(_('Must be called within an '
+                                                  'active lock context.'))
+            if not lock.expired:
+                yield
+            else:
+                raise LockTimeout(target=key)
+
+
+class KeyValueStoreLock(object):
+    """Basic KeyValueStoreLock context manager that hooks into the
+    dogpile.cache backend mutex allowing for distributed locking on resources.
+
+    This is only a write lock, and will not prevent reads from occurring.
+    """
+    def __init__(self, mutex, key, locking_enabled=True, lock_timeout=0):
+        self.mutex = mutex
+        self.key = key
+        self.enabled = locking_enabled
+        self.lock_timeout = lock_timeout
+        self.active = False
+        self.acquire_time = 0
+
+    def acquire(self):
+        if self.enabled:
+            self.mutex.acquire()
+            LOG.debug('KVS lock acquired for: %s', self.key)
+        self.active = True
+        self.acquire_time = time.time()
+        return self
+
+    __enter__ = acquire
+
+    @property
+    def expired(self):
+        if self.lock_timeout:
+            calculated = time.time() - self.acquire_time + LOCK_WINDOW
+            return calculated > self.lock_timeout
+        else:
+            return False
+
+    def release(self):
+        if self.enabled:
+            self.mutex.release()
+            if not self.expired:
+                LOG.debug('KVS lock released for: %s', self.key)
+            else:
+                LOG.warning(_LW('KVS lock released (timeout reached) for: %s'),
+                            self.key)
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.release()
+
+
+def get_key_value_store(name, kvs_region=None):
+    """Instantiate a new :class:`.KeyValueStore` or return a previous
+    instantiation that has the same name.
+    """
+    global KEY_VALUE_STORE_REGISTRY
+
+    _register_backends()
+    key_value_store = KEY_VALUE_STORE_REGISTRY.get(name)
+    if key_value_store is None:
+        if kvs_region is None:
+            kvs_region = region.make_region(name=name)
+        key_value_store = KeyValueStore(kvs_region)
+        KEY_VALUE_STORE_REGISTRY[name] = key_value_store
+    return key_value_store
diff --git a/keystone-moon/keystone/common/kvs/legacy.py b/keystone-moon/keystone/common/kvs/legacy.py
new file mode 100644 (file)
index 0000000..ba03601
--- /dev/null
@@ -0,0 +1,60 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone import exception
+from keystone.openstack.common import versionutils
+
+
+class DictKvs(dict):
+    def get(self, key, default=None):
+        try:
+            if isinstance(self[key], dict):
+                return self[key].copy()
+            else:
+                return self[key][:]
+        except KeyError:
+            if default is not None:
+                return default
+            raise exception.NotFound(target=key)
+
+    def set(self, key, value):
+        if isinstance(value, dict):
+            self[key] = value.copy()
+        else:
+            self[key] = value[:]
+
+    def delete(self, key):
+        """Deletes an item, returning True on success, False otherwise."""
+        try:
+            del self[key]
+        except KeyError:
+            raise exception.NotFound(target=key)
+
+
+INMEMDB = DictKvs()
+
+
+class Base(object):
+    @versionutils.deprecated(versionutils.deprecated.ICEHOUSE,
+                             in_favor_of='keystone.common.kvs.KeyValueStore',
+                             remove_in=+2,
+                             what='keystone.common.kvs.Base')
+    def __init__(self, db=None):
+        if db is None:
+            db = INMEMDB
+        elif isinstance(db, DictKvs):
+            db = db
+        elif isinstance(db, dict):
+            db = DictKvs(db)
+        self.db = db
diff --git a/keystone-moon/keystone/common/ldap/__init__.py b/keystone-moon/keystone/common/ldap/__init__.py
new file mode 100644 (file)
index 0000000..ab5bf4d
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common.ldap.core import *  # noqa
diff --git a/keystone-moon/keystone/common/ldap/core.py b/keystone-moon/keystone/common/ldap/core.py
new file mode 100644 (file)
index 0000000..144c0cf
--- /dev/null
@@ -0,0 +1,1910 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+import codecs
+import functools
+import os.path
+import re
+import sys
+import weakref
+
+import ldap.filter
+import ldappool
+from oslo_log import log
+import six
+
+from keystone import exception
+from keystone.i18n import _
+from keystone.i18n import _LW
+
+LOG = log.getLogger(__name__)
+
+LDAP_VALUES = {'TRUE': True, 'FALSE': False}
+CONTROL_TREEDELETE = '1.2.840.113556.1.4.805'
+LDAP_SCOPES = {'one': ldap.SCOPE_ONELEVEL,
+               'sub': ldap.SCOPE_SUBTREE}
+LDAP_DEREF = {'always': ldap.DEREF_ALWAYS,
+              'default': None,
+              'finding': ldap.DEREF_FINDING,
+              'never': ldap.DEREF_NEVER,
+              'searching': ldap.DEREF_SEARCHING}
+LDAP_TLS_CERTS = {'never': ldap.OPT_X_TLS_NEVER,
+                  'demand': ldap.OPT_X_TLS_DEMAND,
+                  'allow': ldap.OPT_X_TLS_ALLOW}
+
+
+# RFC 4511 (The LDAP Protocol) defines a list containing only the OID '1.1' to
+# indicate that no attributes should be returned besides the DN.
+DN_ONLY = ['1.1']
+
+_utf8_encoder = codecs.getencoder('utf-8')
+
+
+def utf8_encode(value):
+    """Encode a basestring to UTF-8.
+
+    If the string is unicode encode it to UTF-8, if the string is
+    str then assume it's already encoded. Otherwise raise a TypeError.
+
+    :param value: A basestring
+    :returns: UTF-8 encoded version of value
+    :raises: TypeError if value is not basestring
+    """
+    if isinstance(value, six.text_type):
+        return _utf8_encoder(value)[0]
+    elif isinstance(value, six.binary_type):
+        return value
+    else:
+        raise TypeError("value must be basestring, "
+                        "not %s" % value.__class__.__name__)
+
+_utf8_decoder = codecs.getdecoder('utf-8')
+
+
+def utf8_decode(value):
+    """Decode a from UTF-8 into unicode.
+
+    If the value is a binary string assume it's UTF-8 encoded and decode
+    it into a unicode string. Otherwise convert the value from its
+    type into a unicode string.
+
+    :param value: value to be returned as unicode
+    :returns: value as unicode
+    :raises: UnicodeDecodeError for invalid UTF-8 encoding
+    """
+    if isinstance(value, six.binary_type):
+        return _utf8_decoder(value)[0]
+    return six.text_type(value)
+
+
+def py2ldap(val):
+    """Type convert a Python value to a type accepted by LDAP (unicode).
+
+    The LDAP API only accepts strings for values therefore convert
+    the value's type to a unicode string. A subsequent type conversion
+    will encode the unicode as UTF-8 as required by the python-ldap API,
+    but for now we just want a string representation of the value.
+
+    :param val: The value to convert to a LDAP string representation
+    :returns: unicode string representation of value.
+    """
+    if isinstance(val, bool):
+        return u'TRUE' if val else u'FALSE'
+    else:
+        return six.text_type(val)
+
+
+def enabled2py(val):
+    """Similar to ldap2py, only useful for the enabled attribute."""
+
+    try:
+        return LDAP_VALUES[val]
+    except KeyError:
+        pass
+    try:
+        return int(val)
+    except ValueError:
+        pass
+    return utf8_decode(val)
+
+
+def ldap2py(val):
+    """Convert an LDAP formatted value to Python type used by OpenStack.
+
+    Virtually all LDAP values are stored as UTF-8 encoded strings.
+    OpenStack prefers values which are unicode friendly.
+
+    :param val: LDAP formatted value
+    :returns: val converted to preferred Python type
+    """
+    return utf8_decode(val)
+
+
+def convert_ldap_result(ldap_result):
+    """Convert LDAP search result to Python types used by OpenStack.
+
+    Each result tuple is of the form (dn, attrs), where dn is a string
+    containing the DN (distinguished name) of the entry, and attrs is
+    a dictionary containing the attributes associated with the
+    entry. The keys of attrs are strings, and the associated values
+    are lists of strings.
+
+    OpenStack wants to use Python types of its choosing. Strings will
+    be unicode, truth values boolean, whole numbers int's, etc. DN's will
+    also be decoded from UTF-8 to unicode.
+
+    :param ldap_result: LDAP search result
+    :returns: list of 2-tuples containing (dn, attrs) where dn is unicode
+              and attrs is a dict whose values are type converted to
+              OpenStack preferred types.
+    """
+    py_result = []
+    at_least_one_referral = False
+    for dn, attrs in ldap_result:
+        ldap_attrs = {}
+        if dn is None:
+            # this is a Referral object, rather than an Entry object
+            at_least_one_referral = True
+            continue
+
+        for kind, values in six.iteritems(attrs):
+            try:
+                val2py = enabled2py if kind == 'enabled' else ldap2py
+                ldap_attrs[kind] = [val2py(x) for x in values]
+            except UnicodeDecodeError:
+                LOG.debug('Unable to decode value for attribute %s', kind)
+
+        py_result.append((utf8_decode(dn), ldap_attrs))
+    if at_least_one_referral:
+        LOG.debug(('Referrals were returned and ignored. Enable referral '
+                   'chasing in keystone.conf via [ldap] chase_referrals'))
+
+    return py_result
+
+
+def safe_iter(attrs):
+    if attrs is None:
+        return
+    elif isinstance(attrs, list):
+        for e in attrs:
+            yield e
+    else:
+        yield attrs
+
+
+def parse_deref(opt):
+    try:
+        return LDAP_DEREF[opt]
+    except KeyError:
+        raise ValueError(_('Invalid LDAP deref option: %(option)s. '
+                           'Choose one of: %(options)s') %
+                         {'option': opt,
+                          'options': ', '.join(LDAP_DEREF.keys()), })
+
+
+def parse_tls_cert(opt):
+    try:
+        return LDAP_TLS_CERTS[opt]
+    except KeyError:
+        raise ValueError(_(
+            'Invalid LDAP TLS certs option: %(option)s. '
+            'Choose one of: %(options)s') % {
+                'option': opt,
+                'options': ', '.join(LDAP_TLS_CERTS.keys())})
+
+
+def ldap_scope(scope):
+    try:
+        return LDAP_SCOPES[scope]
+    except KeyError:
+        raise ValueError(
+            _('Invalid LDAP scope: %(scope)s. Choose one of: %(options)s') % {
+                'scope': scope,
+                'options': ', '.join(LDAP_SCOPES.keys())})
+
+
+def prep_case_insensitive(value):
+    """Prepare a string for case-insensitive comparison.
+
+    This is defined in RFC4518. For simplicity, all this function does is
+    lowercase all the characters, strip leading and trailing whitespace,
+    and compress sequences of spaces to a single space.
+    """
+    value = re.sub(r'\s+', ' ', value.strip().lower())
+    return value
+
+
+def is_ava_value_equal(attribute_type, val1, val2):
+    """Returns True if and only if the AVAs are equal.
+
+    When comparing AVAs, the equality matching rule for the attribute type
+    should be taken into consideration. For simplicity, this implementation
+    does a case-insensitive comparison.
+
+    Note that this function uses prep_case_insenstive so the limitations of
+    that function apply here.
+
+    """
+
+    return prep_case_insensitive(val1) == prep_case_insensitive(val2)
+
+
+def is_rdn_equal(rdn1, rdn2):
+    """Returns True if and only if the RDNs are equal.
+
+    * RDNs must have the same number of AVAs.
+    * Each AVA of the RDNs must be the equal for the same attribute type. The
+      order isn't significant. Note that an attribute type will only be in one
+      AVA in an RDN, otherwise the DN wouldn't be valid.
+    * Attribute types aren't case sensitive. Note that attribute type
+      comparison is more complicated than implemented. This function only
+      compares case-insentive. The code should handle multiple names for an
+      attribute type (e.g., cn, commonName, and 2.5.4.3 are the same).
+
+    Note that this function uses is_ava_value_equal to compare AVAs so the
+    limitations of that function apply here.
+
+    """
+
+    if len(rdn1) != len(rdn2):
+        return False
+
+    for attr_type_1, val1, dummy in rdn1:
+        found = False
+        for attr_type_2, val2, dummy in rdn2:
+            if attr_type_1.lower() != attr_type_2.lower():
+                continue
+
+            found = True
+            if not is_ava_value_equal(attr_type_1, val1, val2):
+                return False
+            break
+        if not found:
+            return False
+
+    return True
+
+
+def is_dn_equal(dn1, dn2):
+    """Returns True if and only if the DNs are equal.
+
+    Two DNs are equal if they've got the same number of RDNs and if the RDNs
+    are the same at each position. See RFC4517.
+
+    Note that this function uses is_rdn_equal to compare RDNs so the
+    limitations of that function apply here.
+
+    :param dn1: Either a string DN or a DN parsed by ldap.dn.str2dn.
+    :param dn2: Either a string DN or a DN parsed by ldap.dn.str2dn.
+
+    """
+
+    if not isinstance(dn1, list):
+        dn1 = ldap.dn.str2dn(utf8_encode(dn1))
+    if not isinstance(dn2, list):
+        dn2 = ldap.dn.str2dn(utf8_encode(dn2))
+
+    if len(dn1) != len(dn2):
+        return False
+
+    for rdn1, rdn2 in zip(dn1, dn2):
+        if not is_rdn_equal(rdn1, rdn2):
+            return False
+    return True
+
+
+def dn_startswith(descendant_dn, dn):
+    """Returns True if and only if the descendant_dn is under the dn.
+
+    :param descendant_dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
+    :param dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
+
+    """
+
+    if not isinstance(descendant_dn, list):
+        descendant_dn = ldap.dn.str2dn(utf8_encode(descendant_dn))
+    if not isinstance(dn, list):
+        dn = ldap.dn.str2dn(utf8_encode(dn))
+
+    if len(descendant_dn) <= len(dn):
+        return False
+
+    # Use the last len(dn) RDNs.
+    return is_dn_equal(descendant_dn[-len(dn):], dn)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class LDAPHandler(object):
+    '''Abstract class which defines methods for a LDAP API provider.
+
+    Native Keystone values cannot be passed directly into and from the
+    python-ldap API. Type conversion must occur at the LDAP API
+    boudary, examples of type conversions are:
+
+        * booleans map to the strings 'TRUE' and 'FALSE'
+
+        * integer values map to their string representation.
+
+        * unicode strings are encoded in UTF-8
+
+    In addition to handling type conversions at the API boundary we
+    have the requirement to support more than one LDAP API
+    provider. Currently we have:
+
+        * python-ldap, this is the standard LDAP API for Python, it
+          requires access to a live LDAP server.
+
+        * Fake LDAP which emulates python-ldap. This is used for
+          testing without requiring a live LDAP server.
+
+    To support these requirements we need a layer that performs type
+    conversions and then calls another LDAP API which is configurable
+    (e.g. either python-ldap or the fake emulation).
+
+    We have an additional constraint at the time of this writing due to
+    limitations in the logging module. The logging module is not
+    capable of accepting UTF-8 encoded strings, it will throw an
+    encoding exception. Therefore all logging MUST be performed prior
+    to UTF-8 conversion. This means no logging can be performed in the
+    ldap APIs that implement the python-ldap API because those APIs
+    are defined to accept only UTF-8 strings. Thus the layer which
+    performs type conversions must also do the logging. We do the type
+    conversions in two steps, once to convert all Python types to
+    unicode strings, then log, then convert the unicode strings to
+    UTF-8.
+
+    There are a variety of ways one could accomplish this, we elect to
+    use a chaining technique whereby instances of this class simply
+    call the next member in the chain via the "conn" attribute. The
+    chain is constructed by passing in an existing instance of this
+    class as the conn attribute when the class is instantiated.
+
+    Here is a brief explanation of why other possible approaches were
+    not used:
+
+        subclassing
+
+            To perform the wrapping operations in the correct order
+            the type convesion class would have to subclass each of
+            the API providers. This is awkward, doubles the number of
+            classes, and does not scale well. It requires the type
+            conversion class to be aware of all possible API
+            providers.
+
+        decorators
+
+            Decorators provide an elegant solution to wrap methods and
+            would be an ideal way to perform type conversions before
+            calling the wrapped function and then converting the
+            values returned from the wrapped function. However
+            decorators need to be aware of the method signature, it
+            has to know what input parameters need conversion and how
+            to convert the result. For an API like python-ldap which
+            has a large number of different method signatures it would
+            require a large number of specialized
+            decorators. Experience has shown it's very easy to apply
+            the wrong decorator due to the inherent complexity and
+            tendency to cut-n-paste code. Another option is to
+            parameterize the decorator to make it "smart". Experience
+            has shown such decorators become insanely complicated and
+            difficult to understand and debug. Also decorators tend to
+            hide what's really going on when a method is called, the
+            operations being performed are not visible when looking at
+            the implemation of a decorated method, this too experience
+            has shown leads to mistakes.
+
+    Chaining simplifies both wrapping to perform type conversion as
+    well as the substitution of alternative API providers. One simply
+    creates a new instance of the API interface and insert it at the
+    front of the chain. Type conversions are explicit and obvious.
+
+    If a new method needs to be added to the API interface one adds it
+    to the abstract class definition. Should one miss adding the new
+    method to any derivations of the abstract class the code will fail
+    to load and run making it impossible to forget updating all the
+    derived classes.
+    '''
+    @abc.abstractmethod
+    def __init__(self, conn=None):
+        self.conn = conn
+
+    @abc.abstractmethod
+    def connect(self, url, page_size=0, alias_dereferencing=None,
+                use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
+                tls_req_cert='demand', chase_referrals=None, debug_level=None,
+                use_pool=None, pool_size=None, pool_retry_max=None,
+                pool_retry_delay=None, pool_conn_timeout=None,
+                pool_conn_lifetime=None):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def set_option(self, option, invalue):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_option(self, option):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def simple_bind_s(self, who='', cred='',
+                      serverctrls=None, clientctrls=None):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def unbind_s(self):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def add_s(self, dn, modlist):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def search_s(self, base, scope,
+                 filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def search_ext(self, base, scope,
+                   filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
+                   serverctrls=None, clientctrls=None,
+                   timeout=-1, sizelimit=0):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
+                resp_ctrl_classes=None):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def modify_s(self, dn, modlist):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_s(self, dn):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
+        raise exception.NotImplemented()  # pragma: no cover
+
+
+class PythonLDAPHandler(LDAPHandler):
+    '''Implementation of the LDAPHandler interface which calls the
+    python-ldap API.
+
+    Note, the python-ldap API requires all string values to be UTF-8
+    encoded. The KeystoneLDAPHandler enforces this prior to invoking
+    the methods in this class.
+    '''
+
+    def __init__(self, conn=None):
+        super(PythonLDAPHandler, self).__init__(conn=conn)
+
+    def connect(self, url, page_size=0, alias_dereferencing=None,
+                use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
+                tls_req_cert='demand', chase_referrals=None, debug_level=None,
+                use_pool=None, pool_size=None, pool_retry_max=None,
+                pool_retry_delay=None, pool_conn_timeout=None,
+                pool_conn_lifetime=None):
+
+        _common_ldap_initialization(url=url,
+                                    use_tls=use_tls,
+                                    tls_cacertfile=tls_cacertfile,
+                                    tls_cacertdir=tls_cacertdir,
+                                    tls_req_cert=tls_req_cert,
+                                    debug_level=debug_level)
+
+        self.conn = ldap.initialize(url)
+        self.conn.protocol_version = ldap.VERSION3
+
+        if alias_dereferencing is not None:
+            self.conn.set_option(ldap.OPT_DEREF, alias_dereferencing)
+        self.page_size = page_size
+
+        if use_tls:
+            self.conn.start_tls_s()
+
+        if chase_referrals is not None:
+            self.conn.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
+
+    def set_option(self, option, invalue):
+        return self.conn.set_option(option, invalue)
+
+    def get_option(self, option):
+        return self.conn.get_option(option)
+
+    def simple_bind_s(self, who='', cred='',
+                      serverctrls=None, clientctrls=None):
+        return self.conn.simple_bind_s(who, cred, serverctrls, clientctrls)
+
+    def unbind_s(self):
+        return self.conn.unbind_s()
+
+    def add_s(self, dn, modlist):
+        return self.conn.add_s(dn, modlist)
+
+    def search_s(self, base, scope,
+                 filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
+        return self.conn.search_s(base, scope, filterstr,
+                                  attrlist, attrsonly)
+
+    def search_ext(self, base, scope,
+                   filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
+                   serverctrls=None, clientctrls=None,
+                   timeout=-1, sizelimit=0):
+        return self.conn.search_ext(base, scope,
+                                    filterstr, attrlist, attrsonly,
+                                    serverctrls, clientctrls,
+                                    timeout, sizelimit)
+
+    def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
+                resp_ctrl_classes=None):
+        # The resp_ctrl_classes parameter is a recent addition to the
+        # API. It defaults to None. We do not anticipate using it.
+        # To run with older versions of python-ldap we do not pass it.
+        return self.conn.result3(msgid, all, timeout)
+
+    def modify_s(self, dn, modlist):
+        return self.conn.modify_s(dn, modlist)
+
+    def delete_s(self, dn):
+        return self.conn.delete_s(dn)
+
+    def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
+        return self.conn.delete_ext_s(dn, serverctrls, clientctrls)
+
+
+def _common_ldap_initialization(url, use_tls=False, tls_cacertfile=None,
+                                tls_cacertdir=None, tls_req_cert=None,
+                                debug_level=None):
+    '''Method for common ldap initialization between PythonLDAPHandler and
+    PooledLDAPHandler.
+    '''
+
+    LOG.debug("LDAP init: url=%s", url)
+    LOG.debug('LDAP init: use_tls=%s tls_cacertfile=%s tls_cacertdir=%s '
+              'tls_req_cert=%s tls_avail=%s',
+              use_tls, tls_cacertfile, tls_cacertdir,
+              tls_req_cert, ldap.TLS_AVAIL)
+
+    if debug_level is not None:
+        ldap.set_option(ldap.OPT_DEBUG_LEVEL, debug_level)
+
+    using_ldaps = url.lower().startswith("ldaps")
+
+    if use_tls and using_ldaps:
+        raise AssertionError(_('Invalid TLS / LDAPS combination'))
+
+    # The certificate trust options apply for both LDAPS and TLS.
+    if use_tls or using_ldaps:
+        if not ldap.TLS_AVAIL:
+            raise ValueError(_('Invalid LDAP TLS_AVAIL option: %s. TLS '
+                               'not available') % ldap.TLS_AVAIL)
+        if tls_cacertfile:
+            # NOTE(topol)
+            # python ldap TLS does not verify CACERTFILE or CACERTDIR
+            # so we add some extra simple sanity check verification
+            # Also, setting these values globally (i.e. on the ldap object)
+            # works but these values are ignored when setting them on the
+            # connection
+            if not os.path.isfile(tls_cacertfile):
+                raise IOError(_("tls_cacertfile %s not found "
+                                "or is not a file") %
+                              tls_cacertfile)
+            ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, tls_cacertfile)
+        elif tls_cacertdir:
+            # NOTE(topol)
+            # python ldap TLS does not verify CACERTFILE or CACERTDIR
+            # so we add some extra simple sanity check verification
+            # Also, setting these values globally (i.e. on the ldap object)
+            # works but these values are ignored when setting them on the
+            # connection
+            if not os.path.isdir(tls_cacertdir):
+                raise IOError(_("tls_cacertdir %s not found "
+                                "or is not a directory") %
+                              tls_cacertdir)
+            ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir)
+        if tls_req_cert in LDAP_TLS_CERTS.values():
+            ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert)
+        else:
+            LOG.debug("LDAP TLS: invalid TLS_REQUIRE_CERT Option=%s",
+                      tls_req_cert)
+
+
+class MsgId(list):
+    '''Wrapper class to hold connection and msgid.'''
+    pass
+
+
+def use_conn_pool(func):
+    '''Use this only for connection pool specific ldap API.
+
+    This adds connection object to decorated API as next argument after self.
+    '''
+    def wrapper(self, *args, **kwargs):
+        # assert isinstance(self, PooledLDAPHandler)
+        with self._get_pool_connection() as conn:
+            self._apply_options(conn)
+            return func(self, conn, *args, **kwargs)
+    return wrapper
+
+
+class PooledLDAPHandler(LDAPHandler):
+    '''Implementation of the LDAPHandler interface which uses pooled
+    connection manager.
+
+    Pool specific configuration is defined in [ldap] section.
+    All other LDAP configuration is still used from [ldap] section
+
+    Keystone LDAP authentication logic authenticates an end user using its DN
+    and password via LDAP bind to establish supplied password is correct.
+    This can fill up the pool quickly (as pool re-uses existing connection
+    based on its bind data) and would not leave space in pool for connection
+    re-use for other LDAP operations.
+    Now a separate pool can be established for those requests when related flag
+    'use_auth_pool' is enabled. That pool can have its own size and
+    connection lifetime. Other pool attributes are shared between those pools.
+    If 'use_pool' is disabled, then 'use_auth_pool' does not matter.
+    If 'use_auth_pool' is not enabled, then connection pooling is not used for
+    those LDAP operations.
+
+    Note, the python-ldap API requires all string values to be UTF-8
+    encoded. The KeystoneLDAPHandler enforces this prior to invoking
+    the methods in this class.
+    '''
+
+    # Added here to allow override for testing
+    Connector = ldappool.StateConnector
+    auth_pool_prefix = 'auth_pool_'
+
+    connection_pools = {}  # static connector pool dict
+
+    def __init__(self, conn=None, use_auth_pool=False):
+        super(PooledLDAPHandler, self).__init__(conn=conn)
+        self.who = ''
+        self.cred = ''
+        self.conn_options = {}  # connection specific options
+        self.page_size = None
+        self.use_auth_pool = use_auth_pool
+        self.conn_pool = None
+
+    def connect(self, url, page_size=0, alias_dereferencing=None,
+                use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
+                tls_req_cert='demand', chase_referrals=None, debug_level=None,
+                use_pool=None, pool_size=None, pool_retry_max=None,
+                pool_retry_delay=None, pool_conn_timeout=None,
+                pool_conn_lifetime=None):
+
+        _common_ldap_initialization(url=url,
+                                    use_tls=use_tls,
+                                    tls_cacertfile=tls_cacertfile,
+                                    tls_cacertdir=tls_cacertdir,
+                                    tls_req_cert=tls_req_cert,
+                                    debug_level=debug_level)
+
+        self.page_size = page_size
+
+        # Following two options are not added in common initialization as they
+        # need to follow a sequence in PythonLDAPHandler code.
+        if alias_dereferencing is not None:
+            self.set_option(ldap.OPT_DEREF, alias_dereferencing)
+        if chase_referrals is not None:
+            self.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
+
+        if self.use_auth_pool:  # separate pool when use_auth_pool enabled
+            pool_url = self.auth_pool_prefix + url
+        else:
+            pool_url = url
+        try:
+            self.conn_pool = self.connection_pools[pool_url]
+        except KeyError:
+            self.conn_pool = ldappool.ConnectionManager(
+                url,
+                size=pool_size,
+                retry_max=pool_retry_max,
+                retry_delay=pool_retry_delay,
+                timeout=pool_conn_timeout,
+                connector_cls=self.Connector,
+                use_tls=use_tls,
+                max_lifetime=pool_conn_lifetime)
+            self.connection_pools[pool_url] = self.conn_pool
+
+    def set_option(self, option, invalue):
+        self.conn_options[option] = invalue
+
+    def get_option(self, option):
+        value = self.conn_options.get(option)
+        # if option was not specified explicitly, then use connection default
+        # value for that option if there.
+        if value is None:
+            with self._get_pool_connection() as conn:
+                value = conn.get_option(option)
+        return value
+
+    def _apply_options(self, conn):
+        # if connection has a lifetime, then it already has options specified
+        if conn.get_lifetime() > 30:
+            return
+        for option, invalue in six.iteritems(self.conn_options):
+            conn.set_option(option, invalue)
+
+    def _get_pool_connection(self):
+        return self.conn_pool.connection(self.who, self.cred)
+
+    def simple_bind_s(self, who='', cred='',
+                      serverctrls=None, clientctrls=None):
+        '''Not using use_conn_pool decorator here as this API takes cred as
+        input.
+        '''
+        self.who = who
+        self.cred = cred
+        with self._get_pool_connection() as conn:
+            self._apply_options(conn)
+
+    def unbind_s(self):
+        # After connection generator is done `with` statement execution block
+        # connection is always released via finally block in ldappool.
+        # So this unbind is a no op.
+        pass
+
+    @use_conn_pool
+    def add_s(self, conn, dn, modlist):
+        return conn.add_s(dn, modlist)
+
+    @use_conn_pool
+    def search_s(self, conn, base, scope,
+                 filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
+        return conn.search_s(base, scope, filterstr, attrlist,
+                             attrsonly)
+
+    def search_ext(self, base, scope,
+                   filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
+                   serverctrls=None, clientctrls=None,
+                   timeout=-1, sizelimit=0):
+        '''This API is asynchoronus API which returns MsgId instance to be used
+        in result3 call.
+
+        To work with result3 API in predicatable manner, same LDAP connection
+        is needed which provided msgid. So wrapping used connection and msgid
+        in MsgId class. The connection associated with search_ext is released
+        once last hard reference to MsgId object is freed. This will happen
+        when the method is done with returned MsgId usage.
+        '''
+
+        conn_ctxt = self._get_pool_connection()
+        conn = conn_ctxt.__enter__()
+        try:
+            msgid = conn.search_ext(base, scope,
+                                    filterstr, attrlist, attrsonly,
+                                    serverctrls, clientctrls,
+                                    timeout, sizelimit)
+        except Exception:
+            conn_ctxt.__exit__(*sys.exc_info())
+            raise
+        res = MsgId((conn, msgid))
+        weakref.ref(res, functools.partial(conn_ctxt.__exit__,
+                                           None, None, None))
+        return res
+
+    def result3(self, msgid, all=1, timeout=None,
+                resp_ctrl_classes=None):
+        '''This method is used to wait for and return the result of an
+        operation previously initiated by one of the LDAP asynchronous
+        operation routines (eg search_ext()) It returned an invocation
+        identifier (a message id) upon successful initiation of their
+        operation.
+
+        Input msgid is expected to be instance of class MsgId which has LDAP
+        session/connection used to execute search_ext and message idenfier.
+
+        The connection associated with search_ext is released once last hard
+        reference to MsgId object is freed. This will happen when function
+        which requested msgId and used it in result3 exits.
+        '''
+
+        conn, msg_id = msgid
+        return conn.result3(msg_id, all, timeout)
+
+    @use_conn_pool
+    def modify_s(self, conn, dn, modlist):
+        return conn.modify_s(dn, modlist)
+
+    @use_conn_pool
+    def delete_s(self, conn, dn):
+        return conn.delete_s(dn)
+
+    @use_conn_pool
+    def delete_ext_s(self, conn, dn, serverctrls=None, clientctrls=None):
+        return conn.delete_ext_s(dn, serverctrls, clientctrls)
+
+
+class KeystoneLDAPHandler(LDAPHandler):
+    '''Convert data types and perform logging.
+
+    This LDAP inteface wraps the python-ldap based interfaces. The
+    python-ldap interfaces require string values encoded in UTF-8. The
+    OpenStack logging framework at the time of this writing is not
+    capable of accepting strings encoded in UTF-8, the log functions
+    will throw decoding errors if a non-ascii character appears in a
+    string.
+
+    Prior to the call Python data types are converted to a string
+    representation as required by the LDAP APIs.
+
+    Then logging is performed so we can track what is being
+    sent/received from LDAP. Also the logging filters security
+    sensitive items (i.e. passwords).
+
+    Then the string values are encoded into UTF-8.
+
+    Then the LDAP API entry point is invoked.
+
+    Data returned from the LDAP call is converted back from UTF-8
+    encoded strings into the Python data type used internally in
+    OpenStack.
+    '''
+
+    def __init__(self, conn=None):
+        super(KeystoneLDAPHandler, self).__init__(conn=conn)
+        self.page_size = 0
+
+    def __enter__(self):
+        return self
+
+    def _disable_paging(self):
+        # Disable the pagination from now on
+        self.page_size = 0
+
+    def connect(self, url, page_size=0, alias_dereferencing=None,
+                use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
+                tls_req_cert='demand', chase_referrals=None, debug_level=None,
+                use_pool=None, pool_size=None,
+                pool_retry_max=None, pool_retry_delay=None,
+                pool_conn_timeout=None, pool_conn_lifetime=None):
+        self.page_size = page_size
+        return self.conn.connect(url, page_size, alias_dereferencing,
+                                 use_tls, tls_cacertfile, tls_cacertdir,
+                                 tls_req_cert, chase_referrals,
+                                 debug_level=debug_level,
+                                 use_pool=use_pool,
+                                 pool_size=pool_size,
+                                 pool_retry_max=pool_retry_max,
+                                 pool_retry_delay=pool_retry_delay,
+                                 pool_conn_timeout=pool_conn_timeout,
+                                 pool_conn_lifetime=pool_conn_lifetime)
+
+    def set_option(self, option, invalue):
+        return self.conn.set_option(option, invalue)
+
+    def get_option(self, option):
+        return self.conn.get_option(option)
+
+    def simple_bind_s(self, who='', cred='',
+                      serverctrls=None, clientctrls=None):
+        LOG.debug("LDAP bind: who=%s", who)
+        who_utf8 = utf8_encode(who)
+        cred_utf8 = utf8_encode(cred)
+        return self.conn.simple_bind_s(who_utf8, cred_utf8,
+                                       serverctrls=serverctrls,
+                                       clientctrls=clientctrls)
+
+    def unbind_s(self):
+        LOG.debug("LDAP unbind")
+        return self.conn.unbind_s()
+
+    def add_s(self, dn, modlist):
+        ldap_attrs = [(kind, [py2ldap(x) for x in safe_iter(values)])
+                      for kind, values in modlist]
+        logging_attrs = [(kind, values
+                         if kind != 'userPassword'
+                         else ['****'])
+                         for kind, values in ldap_attrs]
+        LOG.debug('LDAP add: dn=%s attrs=%s',
+                  dn, logging_attrs)
+        dn_utf8 = utf8_encode(dn)
+        ldap_attrs_utf8 = [(kind, [utf8_encode(x) for x in safe_iter(values)])
+                           for kind, values in ldap_attrs]
+        return self.conn.add_s(dn_utf8, ldap_attrs_utf8)
+
+    def search_s(self, base, scope,
+                 filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
+        # NOTE(morganfainberg): Remove "None" singletons from this list, which
+        # allows us to set mapped attributes to "None" as defaults in config.
+        # Without this filtering, the ldap query would raise a TypeError since
+        # attrlist is expected to be an iterable of strings.
+        if attrlist is not None:
+            attrlist = [attr for attr in attrlist if attr is not None]
+        LOG.debug('LDAP search: base=%s scope=%s filterstr=%s '
+                  'attrs=%s attrsonly=%s',
+                  base, scope, filterstr, attrlist, attrsonly)
+        if self.page_size:
+            ldap_result = self._paged_search_s(base, scope,
+                                               filterstr, attrlist)
+        else:
+            base_utf8 = utf8_encode(base)
+            filterstr_utf8 = utf8_encode(filterstr)
+            if attrlist is None:
+                attrlist_utf8 = None
+            else:
+                attrlist_utf8 = map(utf8_encode, attrlist)
+            ldap_result = self.conn.search_s(base_utf8, scope,
+                                             filterstr_utf8,
+                                             attrlist_utf8, attrsonly)
+
+        py_result = convert_ldap_result(ldap_result)
+
+        return py_result
+
+    def search_ext(self, base, scope,
+                   filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
+                   serverctrls=None, clientctrls=None,
+                   timeout=-1, sizelimit=0):
+        if attrlist is not None:
+            attrlist = [attr for attr in attrlist if attr is not None]
+        LOG.debug('LDAP search_ext: base=%s scope=%s filterstr=%s '
+                  'attrs=%s attrsonly=%s'
+                  'serverctrls=%s clientctrls=%s timeout=%s sizelimit=%s',
+                  base, scope, filterstr, attrlist, attrsonly,
+                  serverctrls, clientctrls, timeout, sizelimit)
+        return self.conn.search_ext(base, scope,
+                                    filterstr, attrlist, attrsonly,
+                                    serverctrls, clientctrls,
+                                    timeout, sizelimit)
+
+    def _paged_search_s(self, base, scope, filterstr, attrlist=None):
+        res = []
+        use_old_paging_api = False
+        # The API for the simple paged results control changed between
+        # python-ldap 2.3 and 2.4.  We need to detect the capabilities
+        # of the python-ldap version we are using.
+        if hasattr(ldap, 'LDAP_CONTROL_PAGE_OID'):
+            use_old_paging_api = True
+            lc = ldap.controls.SimplePagedResultsControl(
+                controlType=ldap.LDAP_CONTROL_PAGE_OID,
+                criticality=True,
+                controlValue=(self.page_size, ''))
+            page_ctrl_oid = ldap.LDAP_CONTROL_PAGE_OID
+        else:
+            lc = ldap.controls.libldap.SimplePagedResultsControl(
+                criticality=True,
+                size=self.page_size,
+                cookie='')
+            page_ctrl_oid = ldap.controls.SimplePagedResultsControl.controlType
+
+        base_utf8 = utf8_encode(base)
+        filterstr_utf8 = utf8_encode(filterstr)
+        if attrlist is None:
+            attrlist_utf8 = None
+        else:
+            attrlist = [attr for attr in attrlist if attr is not None]
+            attrlist_utf8 = map(utf8_encode, attrlist)
+        msgid = self.conn.search_ext(base_utf8,
+                                     scope,
+                                     filterstr_utf8,
+                                     attrlist_utf8,
+                                     serverctrls=[lc])
+        # Endless loop request pages on ldap server until it has no data
+        while True:
+            # Request to the ldap server a page with 'page_size' entries
+            rtype, rdata, rmsgid, serverctrls = self.conn.result3(msgid)
+            # Receive the data
+            res.extend(rdata)
+            pctrls = [c for c in serverctrls
+                      if c.controlType == page_ctrl_oid]
+            if pctrls:
+                # LDAP server supports pagination
+                if use_old_paging_api:
+                    est, cookie = pctrls[0].controlValue
+                    lc.controlValue = (self.page_size, cookie)
+                else:
+                    cookie = lc.cookie = pctrls[0].cookie
+
+                if cookie:
+                    # There is more data still on the server
+                    # so we request another page
+                    msgid = self.conn.search_ext(base_utf8,
+                                                 scope,
+                                                 filterstr_utf8,
+                                                 attrlist_utf8,
+                                                 serverctrls=[lc])
+                else:
+                    # Exit condition no more data on server
+                    break
+            else:
+                LOG.warning(_LW('LDAP Server does not support paging. '
+                                'Disable paging in keystone.conf to '
+                                'avoid this message.'))
+                self._disable_paging()
+                break
+        return res
+
+    def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
+                resp_ctrl_classes=None):
+        ldap_result = self.conn.result3(msgid, all, timeout, resp_ctrl_classes)
+
+        LOG.debug('LDAP result3: msgid=%s all=%s timeout=%s '
+                  'resp_ctrl_classes=%s ldap_result=%s',
+                  msgid, all, timeout, resp_ctrl_classes, ldap_result)
+
+        py_result = convert_ldap_result(ldap_result)
+        return py_result
+
+    def modify_s(self, dn, modlist):
+        ldap_modlist = [
+            (op, kind, (None if values is None
+                        else [py2ldap(x) for x in safe_iter(values)]))
+            for op, kind, values in modlist]
+
+        logging_modlist = [(op, kind, (values if kind != 'userPassword'
+                           else ['****']))
+                           for op, kind, values in ldap_modlist]
+        LOG.debug('LDAP modify: dn=%s modlist=%s',
+                  dn, logging_modlist)
+
+        dn_utf8 = utf8_encode(dn)
+        ldap_modlist_utf8 = [
+            (op, kind, (None if values is None
+                        else [utf8_encode(x) for x in safe_iter(values)]))
+            for op, kind, values in ldap_modlist]
+        return self.conn.modify_s(dn_utf8, ldap_modlist_utf8)
+
+    def delete_s(self, dn):
+        LOG.debug("LDAP delete: dn=%s", dn)
+        dn_utf8 = utf8_encode(dn)
+        return self.conn.delete_s(dn_utf8)
+
+    def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
+        LOG.debug('LDAP delete_ext: dn=%s serverctrls=%s clientctrls=%s',
+                  dn, serverctrls, clientctrls)
+        dn_utf8 = utf8_encode(dn)
+        return self.conn.delete_ext_s(dn_utf8, serverctrls, clientctrls)
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.unbind_s()
+
+
+_HANDLERS = {}
+
+
+def register_handler(prefix, handler):
+    _HANDLERS[prefix] = handler
+
+
+def _get_connection(conn_url, use_pool=False, use_auth_pool=False):
+    for prefix, handler in six.iteritems(_HANDLERS):
+        if conn_url.startswith(prefix):
+            return handler()
+
+    if use_pool:
+        return PooledLDAPHandler(use_auth_pool=use_auth_pool)
+    else:
+        return PythonLDAPHandler()
+
+
+def filter_entity(entity_ref):
+    """Filter out private items in an entity dict.
+
+    :param entity_ref:  the entity dictionary. The 'dn' field will be removed.
+        'dn' is used in LDAP, but should not be returned to the user.  This
+        value may be modified.
+
+    :returns: entity_ref
+
+    """
+    if entity_ref:
+        entity_ref.pop('dn', None)
+    return entity_ref
+
+
+class BaseLdap(object):
+    DEFAULT_SUFFIX = "dc=example,dc=com"
+    DEFAULT_OU = None
+    DEFAULT_STRUCTURAL_CLASSES = None
+    DEFAULT_ID_ATTR = 'cn'
+    DEFAULT_OBJECTCLASS = None
+    DEFAULT_FILTER = None
+    DEFAULT_EXTRA_ATTR_MAPPING = []
+    DUMB_MEMBER_DN = 'cn=dumb,dc=nonexistent'
+    NotFound = None
+    notfound_arg = None
+    options_name = None
+    model = None
+    attribute_options_names = {}
+    immutable_attrs = []
+    attribute_ignore = []
+    tree_dn = None
+
+    def __init__(self, conf):
+        self.LDAP_URL = conf.ldap.url
+        self.LDAP_USER = conf.ldap.user
+        self.LDAP_PASSWORD = conf.ldap.password
+        self.LDAP_SCOPE = ldap_scope(conf.ldap.query_scope)
+        self.alias_dereferencing = parse_deref(conf.ldap.alias_dereferencing)
+        self.page_size = conf.ldap.page_size
+        self.use_tls = conf.ldap.use_tls
+        self.tls_cacertfile = conf.ldap.tls_cacertfile
+        self.tls_cacertdir = conf.ldap.tls_cacertdir
+        self.tls_req_cert = parse_tls_cert(conf.ldap.tls_req_cert)
+        self.attribute_mapping = {}
+        self.chase_referrals = conf.ldap.chase_referrals
+        self.debug_level = conf.ldap.debug_level
+
+        # LDAP Pool specific attribute
+        self.use_pool = conf.ldap.use_pool
+        self.pool_size = conf.ldap.pool_size
+        self.pool_retry_max = conf.ldap.pool_retry_max
+        self.pool_retry_delay = conf.ldap.pool_retry_delay
+        self.pool_conn_timeout = conf.ldap.pool_connection_timeout
+        self.pool_conn_lifetime = conf.ldap.pool_connection_lifetime
+
+        # End user authentication pool specific config attributes
+        self.use_auth_pool = self.use_pool and conf.ldap.use_auth_pool
+        self.auth_pool_size = conf.ldap.auth_pool_size
+        self.auth_pool_conn_lifetime = conf.ldap.auth_pool_connection_lifetime
+
+        if self.options_name is not None:
+            self.suffix = conf.ldap.suffix
+            if self.suffix is None:
+                self.suffix = self.DEFAULT_SUFFIX
+            dn = '%s_tree_dn' % self.options_name
+            self.tree_dn = (getattr(conf.ldap, dn)
+                            or '%s,%s' % (self.DEFAULT_OU, self.suffix))
+
+            idatt = '%s_id_attribute' % self.options_name
+            self.id_attr = getattr(conf.ldap, idatt) or self.DEFAULT_ID_ATTR
+
+            objclass = '%s_objectclass' % self.options_name
+            self.object_class = (getattr(conf.ldap, objclass)
+                                 or self.DEFAULT_OBJECTCLASS)
+
+            for k, v in six.iteritems(self.attribute_options_names):
+                v = '%s_%s_attribute' % (self.options_name, v)
+                self.attribute_mapping[k] = getattr(conf.ldap, v)
+
+            attr_mapping_opt = ('%s_additional_attribute_mapping' %
+                                self.options_name)
+            attr_mapping = (getattr(conf.ldap, attr_mapping_opt)
+                            or self.DEFAULT_EXTRA_ATTR_MAPPING)
+            self.extra_attr_mapping = self._parse_extra_attrs(attr_mapping)
+
+            ldap_filter = '%s_filter' % self.options_name
+            self.ldap_filter = getattr(conf.ldap,
+                                       ldap_filter) or self.DEFAULT_FILTER
+
+            allow_create = '%s_allow_create' % self.options_name
+            self.allow_create = getattr(conf.ldap, allow_create)
+
+            allow_update = '%s_allow_update' % self.options_name
+            self.allow_update = getattr(conf.ldap, allow_update)
+
+            allow_delete = '%s_allow_delete' % self.options_name
+            self.allow_delete = getattr(conf.ldap, allow_delete)
+
+            member_attribute = '%s_member_attribute' % self.options_name
+            self.member_attribute = getattr(conf.ldap, member_attribute, None)
+
+            self.structural_classes = self.DEFAULT_STRUCTURAL_CLASSES
+
+            if self.notfound_arg is None:
+                self.notfound_arg = self.options_name + '_id'
+
+            attribute_ignore = '%s_attribute_ignore' % self.options_name
+            self.attribute_ignore = getattr(conf.ldap, attribute_ignore)
+
+        self.use_dumb_member = conf.ldap.use_dumb_member
+        self.dumb_member = (conf.ldap.dumb_member or
+                            self.DUMB_MEMBER_DN)
+
+        self.subtree_delete_enabled = conf.ldap.allow_subtree_delete
+
+    def _not_found(self, object_id):
+        if self.NotFound is None:
+            return exception.NotFound(target=object_id)
+        else:
+            return self.NotFound(**{self.notfound_arg: object_id})
+
+    def _parse_extra_attrs(self, option_list):
+        mapping = {}
+        for item in option_list:
+            try:
+                ldap_attr, attr_map = item.split(':')
+            except Exception:
+                LOG.warn(_LW(
+                    'Invalid additional attribute mapping: "%s". '
+                    'Format must be <ldap_attribute>:<keystone_attribute>'),
+                    item)
+                continue
+            mapping[ldap_attr] = attr_map
+        return mapping
+
+    def _is_dumb_member(self, member_dn):
+        """Checks that member is a dumb member.
+
+        :param member_dn: DN of member to be checked.
+        """
+        return (self.use_dumb_member
+                and is_dn_equal(member_dn, self.dumb_member))
+
+    def get_connection(self, user=None, password=None, end_user_auth=False):
+        use_pool = self.use_pool
+        pool_size = self.pool_size
+        pool_conn_lifetime = self.pool_conn_lifetime
+
+        if end_user_auth:
+            if not self.use_auth_pool:
+                use_pool = False
+            else:
+                pool_size = self.auth_pool_size
+                pool_conn_lifetime = self.auth_pool_conn_lifetime
+
+        conn = _get_connection(self.LDAP_URL, use_pool,
+                               use_auth_pool=end_user_auth)
+
+        conn = KeystoneLDAPHandler(conn=conn)
+
+        conn.connect(self.LDAP_URL,
+                     page_size=self.page_size,
+                     alias_dereferencing=self.alias_dereferencing,
+                     use_tls=self.use_tls,
+                     tls_cacertfile=self.tls_cacertfile,
+                     tls_cacertdir=self.tls_cacertdir,
+                     tls_req_cert=self.tls_req_cert,
+                     chase_referrals=self.chase_referrals,
+                     debug_level=self.debug_level,
+                     use_pool=use_pool,
+                     pool_size=pool_size,
+                     pool_retry_max=self.pool_retry_max,
+                     pool_retry_delay=self.pool_retry_delay,
+                     pool_conn_timeout=self.pool_conn_timeout,
+                     pool_conn_lifetime=pool_conn_lifetime
+                     )
+
+        if user is None:
+            user = self.LDAP_USER
+
+        if password is None:
+            password = self.LDAP_PASSWORD
+
+        # not all LDAP servers require authentication, so we don't bind
+        # if we don't have any user/pass
+        if user and password:
+            conn.simple_bind_s(user, password)
+
+        return conn
+
+    def _id_to_dn_string(self, object_id):
+        return u'%s=%s,%s' % (self.id_attr,
+                              ldap.dn.escape_dn_chars(
+                                  six.text_type(object_id)),
+                              self.tree_dn)
+
+    def _id_to_dn(self, object_id):
+        if self.LDAP_SCOPE == ldap.SCOPE_ONELEVEL:
+            return self._id_to_dn_string(object_id)
+        with self.get_connection() as conn:
+            search_result = conn.search_s(
+                self.tree_dn, self.LDAP_SCOPE,
+                u'(&(%(id_attr)s=%(id)s)(objectclass=%(objclass)s))' %
+                {'id_attr': self.id_attr,
+                 'id': ldap.filter.escape_filter_chars(
+                     six.text_type(object_id)),
+                 'objclass': self.object_class},
+                attrlist=DN_ONLY)
+        if search_result:
+            dn, attrs = search_result[0]
+            return dn
+        else:
+            return self._id_to_dn_string(object_id)
+
+    @staticmethod
+    def _dn_to_id(dn):
+        return utf8_decode(ldap.dn.str2dn(utf8_encode(dn))[0][0][1])
+
+    def _ldap_res_to_model(self, res):
+        # LDAP attribute names may be returned in a different case than
+        # they are defined in the mapping, so we need to check for keys
+        # in a case-insensitive way.  We use the case specified in the
+        # mapping for the model to ensure we have a predictable way of
+        # retrieving values later.
+        lower_res = {k.lower(): v for k, v in six.iteritems(res[1])}
+
+        id_attrs = lower_res.get(self.id_attr.lower())
+        if not id_attrs:
+            message = _('ID attribute %(id_attr)s not found in LDAP '
+                        'object %(dn)s') % ({'id_attr': self.id_attr,
+                                             'dn': res[0]})
+            raise exception.NotFound(message=message)
+        if len(id_attrs) > 1:
+            # FIXME(gyee): if this is a multi-value attribute and it has
+            # multiple values, we can't use it as ID. Retain the dn_to_id
+            # logic here so it does not potentially break existing
+            # deployments. We need to fix our read-write LDAP logic so
+            # it does not get the ID from DN.
+            message = _LW('ID attribute %(id_attr)s for LDAP object %(dn)s '
+                          'has multiple values and therefore cannot be used '
+                          'as an ID. Will get the ID from DN instead') % (
+                              {'id_attr': self.id_attr,
+                               'dn': res[0]})
+            LOG.warn(message)
+            id_val = self._dn_to_id(res[0])
+        else:
+            id_val = id_attrs[0]
+        obj = self.model(id=id_val)
+
+        for k in obj.known_keys:
+            if k in self.attribute_ignore:
+                continue
+
+            try:
+                map_attr = self.attribute_mapping.get(k, k)
+                if map_attr is None:
+                    # Ignore attributes that are mapped to None.
+                    continue
+
+                v = lower_res[map_attr.lower()]
+            except KeyError:
+                pass
+            else:
+                try:
+                    obj[k] = v[0]
+                except IndexError:
+                    obj[k] = None
+
+        return obj
+
+    def check_allow_create(self):
+        if not self.allow_create:
+            action = _('LDAP %s create') % self.options_name
+            raise exception.ForbiddenAction(action=action)
+
+    def check_allow_update(self):
+        if not self.allow_update:
+            action = _('LDAP %s update') % self.options_name
+            raise exception.ForbiddenAction(action=action)
+
+    def check_allow_delete(self):
+        if not self.allow_delete:
+            action = _('LDAP %s delete') % self.options_name
+            raise exception.ForbiddenAction(action=action)
+
+    def affirm_unique(self, values):
+        if values.get('name') is not None:
+            try:
+                self.get_by_name(values['name'])
+            except exception.NotFound:
+                pass
+            else:
+                raise exception.Conflict(type=self.options_name,
+                                         details=_('Duplicate name, %s.') %
+                                         values['name'])
+
+        if values.get('id') is not None:
+            try:
+                self.get(values['id'])
+            except exception.NotFound:
+                pass
+            else:
+                raise exception.Conflict(type=self.options_name,
+                                         details=_('Duplicate ID, %s.') %
+                                         values['id'])
+
+    def create(self, values):
+        self.affirm_unique(values)
+        object_classes = self.structural_classes + [self.object_class]
+        attrs = [('objectClass', object_classes)]
+        for k, v in six.iteritems(values):
+            if k in self.attribute_ignore:
+                continue
+            if k == 'id':
+                # no need to check if v is None as 'id' will always have
+                # a value
+                attrs.append((self.id_attr, [v]))
+            elif v is not None:
+                attr_type = self.attribute_mapping.get(k, k)
+                if attr_type is not None:
+                    attrs.append((attr_type, [v]))
+                extra_attrs = [attr for attr, name
+                               in six.iteritems(self.extra_attr_mapping)
+                               if name == k]
+                for attr in extra_attrs:
+                    attrs.append((attr, [v]))
+
+        if 'groupOfNames' in object_classes and self.use_dumb_member:
+            attrs.append(('member', [self.dumb_member]))
+        with self.get_connection() as conn:
+            conn.add_s(self._id_to_dn(values['id']), attrs)
+        return values
+
+    def _ldap_get(self, object_id, ldap_filter=None):
+        query = (u'(&(%(id_attr)s=%(id)s)'
+                 u'%(filter)s'
+                 u'(objectClass=%(object_class)s))'
+                 % {'id_attr': self.id_attr,
+                    'id': ldap.filter.escape_filter_chars(
+                        six.text_type(object_id)),
+                    'filter': (ldap_filter or self.ldap_filter or ''),
+                    'object_class': self.object_class})
+        with self.get_connection() as conn:
+            try:
+                attrs = list(set(([self.id_attr] +
+                                  self.attribute_mapping.values() +
+                                  self.extra_attr_mapping.keys())))
+                res = conn.search_s(self.tree_dn,
+                                    self.LDAP_SCOPE,
+                                    query,
+                                    attrs)
+            except ldap.NO_SUCH_OBJECT:
+                return None
+        try:
+            return res[0]
+        except IndexError:
+            return None
+
+    def _ldap_get_all(self, ldap_filter=None):
+        query = u'(&%s(objectClass=%s))' % (ldap_filter or
+                                            self.ldap_filter or
+                                            '', self.object_class)
+        with self.get_connection() as conn:
+            try:
+                attrs = list(set(([self.id_attr] +
+                                  self.attribute_mapping.values() +
+                                  self.extra_attr_mapping.keys())))
+                return conn.search_s(self.tree_dn,
+                                     self.LDAP_SCOPE,
+                                     query,
+                                     attrs)
+            except ldap.NO_SUCH_OBJECT:
+                return []
+
+    def _ldap_get_list(self, search_base, scope, query_params=None,
+                       attrlist=None):
+        query = u'(objectClass=%s)' % self.object_class
+        if query_params:
+
+            def calc_filter(attrname, value):
+                val_esc = ldap.filter.escape_filter_chars(value)
+                return '(%s=%s)' % (attrname, val_esc)
+
+            query = (u'(&%s%s)' %
+                     (query, ''.join([calc_filter(k, v) for k, v in
+                                      six.iteritems(query_params)])))
+        with self.get_connection() as conn:
+            return conn.search_s(search_base, scope, query, attrlist)
+
+    def get(self, object_id, ldap_filter=None):
+        res = self._ldap_get(object_id, ldap_filter)
+        if res is None:
+            raise self._not_found(object_id)
+        else:
+            return self._ldap_res_to_model(res)
+
+    def get_by_name(self, name, ldap_filter=None):
+        query = (u'(%s=%s)' % (self.attribute_mapping['name'],
+                               ldap.filter.escape_filter_chars(
+                                   six.text_type(name))))
+        res = self.get_all(query)
+        try:
+            return res[0]
+        except IndexError:
+            raise self._not_found(name)
+
+    def get_all(self, ldap_filter=None):
+        return [self._ldap_res_to_model(x)
+                for x in self._ldap_get_all(ldap_filter)]
+
+    def update(self, object_id, values, old_obj=None):
+        if old_obj is None:
+            old_obj = self.get(object_id)
+
+        modlist = []
+        for k, v in six.iteritems(values):
+            if k == 'id':
+                # id can't be modified.
+                continue
+
+            if k in self.attribute_ignore:
+
+                # Handle 'enabled' specially since can't disable if ignored.
+                if k == 'enabled' and (not v):
+                    action = _("Disabling an entity where the 'enable' "
+                               "attribute is ignored by configuration.")
+                    raise exception.ForbiddenAction(action=action)
+
+                continue
+
+            # attribute value has not changed
+            if k in old_obj and old_obj[k] == v:
+                continue
+
+            if k in self.immutable_attrs:
+                msg = (_("Cannot change %(option_name)s %(attr)s") %
+                       {'option_name': self.options_name, 'attr': k})
+                raise exception.ValidationError(msg)
+
+            if v is None:
+                if old_obj.get(k) is not None:
+                    modlist.append((ldap.MOD_DELETE,
+                                    self.attribute_mapping.get(k, k),
+                                    None))
+                continue
+
+            current_value = old_obj.get(k)
+            if current_value is None:
+                op = ldap.MOD_ADD
+                modlist.append((op, self.attribute_mapping.get(k, k), [v]))
+            elif current_value != v:
+                op = ldap.MOD_REPLACE
+                modlist.append((op, self.attribute_mapping.get(k, k), [v]))
+
+        if modlist:
+            with self.get_connection() as conn:
+                try:
+                    conn.modify_s(self._id_to_dn(object_id), modlist)
+                except ldap.NO_SUCH_OBJECT:
+                    raise self._not_found(object_id)
+
+        return self.get(object_id)
+
+    def delete(self, object_id):
+        with self.get_connection() as conn:
+            try:
+                conn.delete_s(self._id_to_dn(object_id))
+            except ldap.NO_SUCH_OBJECT:
+                raise self._not_found(object_id)
+
+    def deleteTree(self, object_id):
+        tree_delete_control = ldap.controls.LDAPControl(CONTROL_TREEDELETE,
+                                                        0,
+                                                        None)
+        with self.get_connection() as conn:
+            try:
+                conn.delete_ext_s(self._id_to_dn(object_id),
+                                  serverctrls=[tree_delete_control])
+            except ldap.NO_SUCH_OBJECT:
+                raise self._not_found(object_id)
+            except ldap.NOT_ALLOWED_ON_NONLEAF:
+                # Most LDAP servers do not support the tree_delete_control.
+                # In these servers, the usual idiom is to first perform a
+                # search to get the entries to delete, then delete them in
+                # in order of child to parent, since LDAP forbids the
+                # deletion of a parent entry before deleting the children
+                # of that parent.  The simplest way to do that is to delete
+                # the entries in order of the length of the DN, from longest
+                # to shortest DN.
+                dn = self._id_to_dn(object_id)
+                scope = ldap.SCOPE_SUBTREE
+                # With some directory servers, an entry with objectclass
+                # ldapsubentry will not be returned unless it is explicitly
+                # requested, by specifying the objectclass in the search
+                # filter.  We must specify this, with objectclass=*, in an
+                # LDAP filter OR clause, in order to return all entries
+                filt = '(|(objectclass=*)(objectclass=ldapsubentry))'
+                # We only need the DNs of the entries.  Since no attributes
+                # will be returned, we do not have to specify attrsonly=1.
+                entries = conn.search_s(dn, scope, filt, attrlist=DN_ONLY)
+                if entries:
+                    for dn in sorted((e[0] for e in entries),
+                                     key=len, reverse=True):
+                        conn.delete_s(dn)
+                else:
+                    LOG.debug('No entries in LDAP subtree %s', dn)
+
+    def add_member(self, member_dn, member_list_dn):
+        """Add member to the member list.
+
+        :param member_dn: DN of member to be added.
+        :param member_list_dn: DN of group to which the
+                               member will be added.
+
+        :raises: exception.Conflict: If the user was already a member.
+                 self.NotFound: If the group entry didn't exist.
+        """
+        with self.get_connection() as conn:
+            try:
+                mod = (ldap.MOD_ADD, self.member_attribute, member_dn)
+                conn.modify_s(member_list_dn, [mod])
+            except ldap.TYPE_OR_VALUE_EXISTS:
+                raise exception.Conflict(_('Member %(member)s '
+                                           'is already a member'
+                                           ' of group %(group)s') % {
+                                         'member': member_dn,
+                                         'group': member_list_dn})
+            except ldap.NO_SUCH_OBJECT:
+                raise self._not_found(member_list_dn)
+
+    def remove_member(self, member_dn, member_list_dn):
+        """Remove member from the member list.
+
+        :param member_dn: DN of member to be removed.
+        :param member_list_dn: DN of group from which the
+                               member will be removed.
+
+        :raises: self.NotFound: If the group entry didn't exist.
+                 ldap.NO_SUCH_ATTRIBUTE: If the user wasn't a member.
+        """
+        with self.get_connection() as conn:
+            try:
+                mod = (ldap.MOD_DELETE, self.member_attribute, member_dn)
+                conn.modify_s(member_list_dn, [mod])
+            except ldap.NO_SUCH_OBJECT:
+                raise self._not_found(member_list_dn)
+
+    def _delete_tree_nodes(self, search_base, scope, query_params=None):
+        query = u'(objectClass=%s)' % self.object_class
+        if query_params:
+            query = (u'(&%s%s)' %
+                     (query, ''.join(['(%s=%s)'
+                                      % (k, ldap.filter.escape_filter_chars(v))
+                                      for k, v in
+                                      six.iteritems(query_params)])))
+        not_deleted_nodes = []
+        with self.get_connection() as conn:
+            try:
+                nodes = conn.search_s(search_base, scope, query,
+                                      attrlist=DN_ONLY)
+            except ldap.NO_SUCH_OBJECT:
+                LOG.debug('Could not find entry with dn=%s', search_base)
+                raise self._not_found(self._dn_to_id(search_base))
+            else:
+                for node_dn, _t in nodes:
+                    try:
+                        conn.delete_s(node_dn)
+                    except ldap.NO_SUCH_OBJECT:
+                        not_deleted_nodes.append(node_dn)
+
+        if not_deleted_nodes:
+            LOG.warn(_LW("When deleting entries for %(search_base)s, could not"
+                         " delete nonexistent entries %(entries)s%(dots)s"),
+                     {'search_base': search_base,
+                      'entries': not_deleted_nodes[:3],
+                      'dots': '...' if len(not_deleted_nodes) > 3 else ''})
+
+    def filter_query(self, hints, query=None):
+        """Applies filtering to a query.
+
+        :param hints: contains the list of filters, which may be None,
+                      indicating that there are no filters to be applied.
+                      If it's not None, then any filters satisfied here will be
+                      removed so that the caller will know if any filters
+                      remain to be applied.
+        :param query: LDAP query into which to include filters
+
+        :returns query: LDAP query, updated with any filters satisfied
+
+        """
+        def build_filter(filter_, hints):
+            """Build a filter for the query.
+
+            :param filter_: the dict that describes this filter
+            :param hints: contains the list of filters yet to be satisfied.
+
+            :returns query: LDAP query term to be added
+
+            """
+            ldap_attr = self.attribute_mapping[filter_['name']]
+            val_esc = ldap.filter.escape_filter_chars(filter_['value'])
+
+            if filter_['case_sensitive']:
+                # NOTE(henry-nash): Although dependent on the schema being
+                # used, most LDAP attributes are configured with case
+                # insensitive matching rules, so we'll leave this to the
+                # controller to filter.
+                return
+
+            if filter_['name'] == 'enabled':
+                # NOTE(henry-nash): Due to the different options for storing
+                # the enabled attribute (e,g, emulated or not), for now we
+                # don't try and filter this at the driver level - we simply
+                # leave the filter to be handled by the controller. It seems
+                # unlikley that this will cause a signifcant performance
+                # issue.
+                return
+
+            # TODO(henry-nash): Currently there are no booleans (other than
+            # 'enabled' that is handled above) on which you can filter. If
+            # there were, we would need to add special handling here to
+            # convert the booleans values to 'TRUE' and 'FALSE'. To do that
+            # we would also need to know which filter keys were actually
+            # booleans (this is related to bug #1411478).
+
+            if filter_['comparator'] == 'equals':
+                query_term = (u'(%(attr)s=%(val)s)'
+                              % {'attr': ldap_attr, 'val': val_esc})
+            elif filter_['comparator'] == 'contains':
+                query_term = (u'(%(attr)s=*%(val)s*)'
+                              % {'attr': ldap_attr, 'val': val_esc})
+            elif filter_['comparator'] == 'startswith':
+                query_term = (u'(%(attr)s=%(val)s*)'
+                              % {'attr': ldap_attr, 'val': val_esc})
+            elif filter_['comparator'] == 'endswith':
+                query_term = (u'(%(attr)s=*%(val)s)'
+                              % {'attr': ldap_attr, 'val': val_esc})
+            else:
+                # It's a filter we don't understand, so let the caller
+                # work out if they need to do something with it.
+                return
+
+            return query_term
+
+        if hints is None:
+            return query
+
+        filter_list = []
+        satisfied_filters = []
+
+        for filter_ in hints.filters:
+            if filter_['name'] not in self.attribute_mapping:
+                continue
+            new_filter = build_filter(filter_, hints)
+            if new_filter is not None:
+                filter_list.append(new_filter)
+                satisfied_filters.append(filter_)
+
+        if filter_list:
+            query = u'(&%s%s)' % (query, ''.join(filter_list))
+
+        # Remove satisfied filters, then the caller will know remaining filters
+        for filter_ in satisfied_filters:
+            hints.filters.remove(filter_)
+
+        return query
+
+
+class EnabledEmuMixIn(BaseLdap):
+    """Emulates boolean 'enabled' attribute if turned on.
+
+    Creates groupOfNames holding all enabled objects of this class, all missing
+    objects are considered disabled.
+
+    Options:
+
+    * $name_enabled_emulation - boolean, on/off
+    * $name_enabled_emulation_dn - DN of that groupOfNames, default is
+      cn=enabled_${name}s,${tree_dn}
+
+    Where ${name}s is the plural of self.options_name ('users' or 'tenants'),
+    ${tree_dn} is self.tree_dn.
+    """
+
+    def __init__(self, conf):
+        super(EnabledEmuMixIn, self).__init__(conf)
+        enabled_emulation = '%s_enabled_emulation' % self.options_name
+        self.enabled_emulation = getattr(conf.ldap, enabled_emulation)
+
+        enabled_emulation_dn = '%s_enabled_emulation_dn' % self.options_name
+        self.enabled_emulation_dn = getattr(conf.ldap, enabled_emulation_dn)
+        if not self.enabled_emulation_dn:
+            naming_attr_name = 'cn'
+            naming_attr_value = 'enabled_%ss' % self.options_name
+            sub_vals = (naming_attr_name, naming_attr_value, self.tree_dn)
+            self.enabled_emulation_dn = '%s=%s,%s' % sub_vals
+            naming_attr = (naming_attr_name, [naming_attr_value])
+        else:
+            # Extract the attribute name and value from the configured DN.
+            naming_dn = ldap.dn.str2dn(utf8_encode(self.enabled_emulation_dn))
+            naming_rdn = naming_dn[0][0]
+            naming_attr = (utf8_decode(naming_rdn[0]),
+                           utf8_decode(naming_rdn[1]))
+        self.enabled_emulation_naming_attr = naming_attr
+
+    def _get_enabled(self, object_id):
+        dn = self._id_to_dn(object_id)
+        query = '(member=%s)' % dn
+        with self.get_connection() as conn:
+            try:
+                enabled_value = conn.search_s(self.enabled_emulation_dn,
+                                              ldap.SCOPE_BASE,
+                                              query, ['cn'])
+            except ldap.NO_SUCH_OBJECT:
+                return False
+            else:
+                return bool(enabled_value)
+
+    def _add_enabled(self, object_id):
+        if not self._get_enabled(object_id):
+            modlist = [(ldap.MOD_ADD,
+                        'member',
+                        [self._id_to_dn(object_id)])]
+            with self.get_connection() as conn:
+                try:
+                    conn.modify_s(self.enabled_emulation_dn, modlist)
+                except ldap.NO_SUCH_OBJECT:
+                    attr_list = [('objectClass', ['groupOfNames']),
+                                 ('member', [self._id_to_dn(object_id)]),
+                                 self.enabled_emulation_naming_attr]
+                    if self.use_dumb_member:
+                        attr_list[1][1].append(self.dumb_member)
+                    conn.add_s(self.enabled_emulation_dn, attr_list)
+
+    def _remove_enabled(self, object_id):
+        modlist = [(ldap.MOD_DELETE,
+                    'member',
+                    [self._id_to_dn(object_id)])]
+        with self.get_connection() as conn:
+            try:
+                conn.modify_s(self.enabled_emulation_dn, modlist)
+            except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE):
+                pass
+
+    def create(self, values):
+        if self.enabled_emulation:
+            enabled_value = values.pop('enabled', True)
+            ref = super(EnabledEmuMixIn, self).create(values)
+            if 'enabled' not in self.attribute_ignore:
+                if enabled_value:
+                    self._add_enabled(ref['id'])
+                ref['enabled'] = enabled_value
+            return ref
+        else:
+            return super(EnabledEmuMixIn, self).create(values)
+
+    def get(self, object_id, ldap_filter=None):
+        ref = super(EnabledEmuMixIn, self).get(object_id, ldap_filter)
+        if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
+            ref['enabled'] = self._get_enabled(object_id)
+        return ref
+
+    def get_all(self, ldap_filter=None):
+        if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
+            # had to copy BaseLdap.get_all here to ldap_filter by DN
+            tenant_list = [self._ldap_res_to_model(x)
+                           for x in self._ldap_get_all(ldap_filter)
+                           if x[0] != self.enabled_emulation_dn]
+            for tenant_ref in tenant_list:
+                tenant_ref['enabled'] = self._get_enabled(tenant_ref['id'])
+            return tenant_list
+        else:
+            return super(EnabledEmuMixIn, self).get_all(ldap_filter)
+
+    def update(self, object_id, values, old_obj=None):
+        if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
+            data = values.copy()
+            enabled_value = data.pop('enabled', None)
+            ref = super(EnabledEmuMixIn, self).update(object_id, data, old_obj)
+            if enabled_value is not None:
+                if enabled_value:
+                    self._add_enabled(object_id)
+                else:
+                    self._remove_enabled(object_id)
+                ref['enabled'] = enabled_value
+            return ref
+        else:
+            return super(EnabledEmuMixIn, self).update(
+                object_id, values, old_obj)
+
+    def delete(self, object_id):
+        if self.enabled_emulation:
+            self._remove_enabled(object_id)
+        super(EnabledEmuMixIn, self).delete(object_id)
+
+
+class ProjectLdapStructureMixin(object):
+    """Project LDAP Structure shared between LDAP backends.
+
+    This is shared between the resource and assignment LDAP backends.
+
+    """
+    DEFAULT_OU = 'ou=Groups'
+    DEFAULT_STRUCTURAL_CLASSES = []
+    DEFAULT_OBJECTCLASS = 'groupOfNames'
+    DEFAULT_ID_ATTR = 'cn'
+    NotFound = exception.ProjectNotFound
+    notfound_arg = 'project_id'  # NOTE(yorik-sar): while options_name = tenant
+    options_name = 'project'
+    attribute_options_names = {'name': 'name',
+                               'description': 'desc',
+                               'enabled': 'enabled',
+                               'domain_id': 'domain_id'}
+    immutable_attrs = ['name']
diff --git a/keystone-moon/keystone/common/manager.py b/keystone-moon/keystone/common/manager.py
new file mode 100644 (file)
index 0000000..28bf2ef
--- /dev/null
@@ -0,0 +1,76 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+
+from oslo_utils import importutils
+
+
+def response_truncated(f):
+    """Truncate the list returned by the wrapped function.
+
+    This is designed to wrap Manager list_{entity} methods to ensure that
+    any list limits that are defined are passed to the driver layer.  If a
+    hints list is provided, the wrapper will insert the relevant limit into
+    the hints so that the underlying driver call can try and honor it. If the
+    driver does truncate the response, it will update the 'truncated' attribute
+    in the 'limit' entry in the hints list, which enables the caller of this
+    function to know if truncation has taken place.  If, however, the driver
+    layer is unable to perform truncation, the 'limit' entry is simply left in
+    the hints list for the caller to handle.
+
+    A _get_list_limit() method is required to be present in the object class
+    hierarchy, which returns the limit for this backend to which we will
+    truncate.
+
+    If a hints list is not provided in the arguments of the wrapped call then
+    any limits set in the config file are ignored.  This allows internal use
+    of such wrapped methods where the entire data set is needed as input for
+    the calculations of some other API (e.g. get role assignments for a given
+    project).
+
+    """
+    @functools.wraps(f)
+    def wrapper(self, *args, **kwargs):
+        if kwargs.get('hints') is None:
+            return f(self, *args, **kwargs)
+
+        list_limit = self.driver._get_list_limit()
+        if list_limit:
+            kwargs['hints'].set_limit(list_limit)
+        return f(self, *args, **kwargs)
+    return wrapper
+
+
+class Manager(object):
+    """Base class for intermediary request layer.
+
+    The Manager layer exists to support additional logic that applies to all
+    or some of the methods exposed by a service that are not specific to the
+    HTTP interface.
+
+    It also provides a stable entry point to dynamic backends.
+
+    An example of a probable use case is logging all the calls.
+
+    """
+
+    def __init__(self, driver_name):
+        self.driver = importutils.import_object(driver_name)
+
+    def __getattr__(self, name):
+        """Forward calls to the underlying driver."""
+        f = getattr(self.driver, name)
+        setattr(self, name, f)
+        return f
diff --git a/keystone-moon/keystone/common/models.py b/keystone-moon/keystone/common/models.py
new file mode 100644 (file)
index 0000000..3b3aabe
--- /dev/null
@@ -0,0 +1,182 @@
+# Copyright (C) 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Base model for keystone internal services
+
+Unless marked otherwise, all fields are strings.
+
+"""
+
+
+class Model(dict):
+    """Base model class."""
+    def __hash__(self):
+        return self['id'].__hash__()
+
+    @property
+    def known_keys(cls):
+        return cls.required_keys + cls.optional_keys
+
+
+class Token(Model):
+    """Token object.
+
+    Required keys:
+        id
+        expires (datetime)
+
+    Optional keys:
+        user
+        tenant
+        metadata
+        trust_id
+    """
+
+    required_keys = ('id', 'expires')
+    optional_keys = ('extra',)
+
+
+class Service(Model):
+    """Service object.
+
+    Required keys:
+        id
+        type
+        name
+
+    Optional keys:
+    """
+
+    required_keys = ('id', 'type', 'name')
+    optional_keys = tuple()
+
+
+class Endpoint(Model):
+    """Endpoint object
+
+    Required keys:
+        id
+        region
+        service_id
+
+    Optional keys:
+        internalurl
+        publicurl
+        adminurl
+    """
+
+    required_keys = ('id', 'region', 'service_id')
+    optional_keys = ('internalurl', 'publicurl', 'adminurl')
+
+
+class User(Model):
+    """User object.
+
+    Required keys:
+        id
+        name
+        domain_id
+
+    Optional keys:
+        password
+        description
+        email
+        enabled (bool, default True)
+        default_project_id
+    """
+
+    required_keys = ('id', 'name', 'domain_id')
+    optional_keys = ('password', 'description', 'email', 'enabled',
+                     'default_project_id')
+
+
+class Group(Model):
+    """Group object.
+
+    Required keys:
+        id
+        name
+        domain_id
+
+    Optional keys:
+
+        description
+
+    """
+
+    required_keys = ('id', 'name', 'domain_id')
+    optional_keys = ('description',)
+
+
+class Project(Model):
+    """Project object.
+
+    Required keys:
+        id
+        name
+        domain_id
+
+    Optional Keys:
+        description
+        enabled (bool, default True)
+
+    """
+
+    required_keys = ('id', 'name', 'domain_id')
+    optional_keys = ('description', 'enabled')
+
+
+class Role(Model):
+    """Role object.
+
+    Required keys:
+        id
+        name
+
+    """
+
+    required_keys = ('id', 'name')
+    optional_keys = tuple()
+
+
+class Trust(Model):
+    """Trust object.
+
+    Required keys:
+        id
+        trustor_user_id
+        trustee_user_id
+        project_id
+    """
+
+    required_keys = ('id', 'trustor_user_id', 'trustee_user_id', 'project_id')
+    optional_keys = ('expires_at',)
+
+
+class Domain(Model):
+    """Domain object.
+
+    Required keys:
+        id
+        name
+
+    Optional keys:
+
+        description
+        enabled (bool, default True)
+
+    """
+
+    required_keys = ('id', 'name')
+    optional_keys = ('description', 'enabled')
diff --git a/keystone-moon/keystone/common/openssl.py b/keystone-moon/keystone/common/openssl.py
new file mode 100644 (file)
index 0000000..4eb7d1d
--- /dev/null
@@ -0,0 +1,347 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import os
+
+from oslo_config import cfg
+from oslo_log import log
+
+from keystone.common import environment
+from keystone.common import utils
+from keystone.i18n import _LI, _LE
+
+LOG = log.getLogger(__name__)
+CONF = cfg.CONF
+
+PUBLIC_DIR_PERMS = 0o755        # -rwxr-xr-x
+PRIVATE_DIR_PERMS = 0o750       # -rwxr-x---
+PUBLIC_FILE_PERMS = 0o644       # -rw-r--r--
+PRIVATE_FILE_PERMS = 0o640      # -rw-r-----
+
+
+def file_exists(file_path):
+    return os.path.exists(file_path)
+
+
+class BaseCertificateConfigure(object):
+    """Create a certificate signing environment.
+
+    This is based on a config section and reasonable OpenSSL defaults.
+
+    """
+
+    def __init__(self, conf_obj, server_conf_obj, keystone_user,
+                 keystone_group, rebuild, **kwargs):
+        self.conf_dir = os.path.dirname(server_conf_obj.ca_certs)
+        self.use_keystone_user = keystone_user
+        self.use_keystone_group = keystone_group
+        self.rebuild = rebuild
+        self.ssl_config_file_name = os.path.join(self.conf_dir, "openssl.conf")
+        self.request_file_name = os.path.join(self.conf_dir, "req.pem")
+        self.ssl_dictionary = {'conf_dir': self.conf_dir,
+                               'ca_cert': server_conf_obj.ca_certs,
+                               'default_md': 'default',
+                               'ssl_config': self.ssl_config_file_name,
+                               'ca_private_key': conf_obj.ca_key,
+                               'request_file': self.request_file_name,
+                               'signing_key': server_conf_obj.keyfile,
+                               'signing_cert': server_conf_obj.certfile,
+                               'key_size': int(conf_obj.key_size),
+                               'valid_days': int(conf_obj.valid_days),
+                               'cert_subject': conf_obj.cert_subject}
+
+        try:
+            # OpenSSL 1.0 and newer support default_md = default, olders do not
+            openssl_ver = environment.subprocess.Popen(
+                ['openssl', 'version'],
+                stdout=environment.subprocess.PIPE).stdout.read()
+            if "OpenSSL 0." in openssl_ver:
+                self.ssl_dictionary['default_md'] = 'sha1'
+        except OSError:
+            LOG.warn('Failed to invoke ``openssl version``, '
+                     'assuming is v1.0 or newer')
+        self.ssl_dictionary.update(kwargs)
+
+    def exec_command(self, command):
+        to_exec = []
+        for cmd_part in command:
+            to_exec.append(cmd_part % self.ssl_dictionary)
+        LOG.info(_LI('Running command - %s'), ' '.join(to_exec))
+        # NOTE(Jeffrey4l): Redirect both stdout and stderr to pipe, so the
+        # output can be captured.
+        # NOTE(Jeffrey4l): check_output is not compatible with Python 2.6.
+        # So use Popen instead.
+        process = environment.subprocess.Popen(
+            to_exec,
+            stdout=environment.subprocess.PIPE,
+            stderr=environment.subprocess.STDOUT)
+        output = process.communicate()[0]
+        retcode = process.poll()
+        if retcode:
+            LOG.error(_LE('Command %(to_exec)s exited with %(retcode)s'
+                          '- %(output)s'),
+                      {'to_exec': to_exec,
+                       'retcode': retcode,
+                       'output': output})
+            e = environment.subprocess.CalledProcessError(retcode, to_exec[0])
+            # NOTE(Jeffrey4l): Python 2.6 compatibility:
+            # CalledProcessError did not have output keyword argument
+            e.output = output
+            raise e
+
+    def clean_up_existing_files(self):
+        files_to_clean = [self.ssl_dictionary['ca_private_key'],
+                          self.ssl_dictionary['ca_cert'],
+                          self.ssl_dictionary['signing_key'],
+                          self.ssl_dictionary['signing_cert'],
+                          ]
+
+        existing_files = []
+
+        for file_path in files_to_clean:
+            if file_exists(file_path):
+                if self.rebuild:
+                    # The file exists but the user wants to rebuild it, so blow
+                    # it away
+                    try:
+                        os.remove(file_path)
+                    except OSError as exc:
+                        LOG.error(_LE('Failed to remove file %(file_path)r: '
+                                      '%(error)s'),
+                                  {'file_path': file_path,
+                                   'error': exc.strerror})
+                        raise
+                else:
+                    existing_files.append(file_path)
+
+        return existing_files
+
+    def build_ssl_config_file(self):
+        utils.make_dirs(os.path.dirname(self.ssl_config_file_name),
+                        mode=PUBLIC_DIR_PERMS,
+                        user=self.use_keystone_user,
+                        group=self.use_keystone_group, log=LOG)
+        if not file_exists(self.ssl_config_file_name):
+            ssl_config_file = open(self.ssl_config_file_name, 'w')
+            ssl_config_file.write(self.sslconfig % self.ssl_dictionary)
+            ssl_config_file.close()
+        utils.set_permissions(self.ssl_config_file_name,
+                              mode=PRIVATE_FILE_PERMS,
+                              user=self.use_keystone_user,
+                              group=self.use_keystone_group, log=LOG)
+
+        index_file_name = os.path.join(self.conf_dir, 'index.txt')
+        if not file_exists(index_file_name):
+            index_file = open(index_file_name, 'w')
+            index_file.write('')
+            index_file.close()
+        utils.set_permissions(index_file_name,
+                              mode=PRIVATE_FILE_PERMS,
+                              user=self.use_keystone_user,
+                              group=self.use_keystone_group, log=LOG)
+
+        serial_file_name = os.path.join(self.conf_dir, 'serial')
+        if not file_exists(serial_file_name):
+            index_file = open(serial_file_name, 'w')
+            index_file.write('01')
+            index_file.close()
+        utils.set_permissions(serial_file_name,
+                              mode=PRIVATE_FILE_PERMS,
+                              user=self.use_keystone_user,
+                              group=self.use_keystone_group, log=LOG)
+
+    def build_ca_cert(self):
+        ca_key_file = self.ssl_dictionary['ca_private_key']
+        utils.make_dirs(os.path.dirname(ca_key_file),
+                        mode=PRIVATE_DIR_PERMS,
+                        user=self.use_keystone_user,
+                        group=self.use_keystone_group, log=LOG)
+        if not file_exists(ca_key_file):
+            self.exec_command(['openssl', 'genrsa',
+                               '-out', '%(ca_private_key)s',
+                               '%(key_size)d'])
+        utils.set_permissions(ca_key_file,
+                              mode=PRIVATE_FILE_PERMS,
+                              user=self.use_keystone_user,
+                              group=self.use_keystone_group, log=LOG)
+
+        ca_cert = self.ssl_dictionary['ca_cert']
+        utils.make_dirs(os.path.dirname(ca_cert),
+                        mode=PUBLIC_DIR_PERMS,
+                        user=self.use_keystone_user,
+                        group=self.use_keystone_group, log=LOG)
+        if not file_exists(ca_cert):
+            self.exec_command(['openssl', 'req', '-new', '-x509',
+                               '-extensions', 'v3_ca',
+                               '-key', '%(ca_private_key)s',
+                               '-out', '%(ca_cert)s',
+                               '-days', '%(valid_days)d',
+                               '-config', '%(ssl_config)s',
+                               '-subj', '%(cert_subject)s'])
+        utils.set_permissions(ca_cert,
+                              mode=PUBLIC_FILE_PERMS,
+                              user=self.use_keystone_user,
+                              group=self.use_keystone_group, log=LOG)
+
+    def build_private_key(self):
+        signing_keyfile = self.ssl_dictionary['signing_key']
+        utils.make_dirs(os.path.dirname(signing_keyfile),
+                        mode=PRIVATE_DIR_PERMS,
+                        user=self.use_keystone_user,
+                        group=self.use_keystone_group, log=LOG)
+        if not file_exists(signing_keyfile):
+            self.exec_command(['openssl', 'genrsa', '-out', '%(signing_key)s',
+                               '%(key_size)d'])
+        utils.set_permissions(signing_keyfile,
+                              mode=PRIVATE_FILE_PERMS,
+                              user=self.use_keystone_user,
+                              group=self.use_keystone_group, log=LOG)
+
+    def build_signing_cert(self):
+        signing_cert = self.ssl_dictionary['signing_cert']
+
+        utils.make_dirs(os.path.dirname(signing_cert),
+                        mode=PUBLIC_DIR_PERMS,
+                        user=self.use_keystone_user,
+                        group=self.use_keystone_group, log=LOG)
+        if not file_exists(signing_cert):
+            self.exec_command(['openssl', 'req', '-key', '%(signing_key)s',
+                               '-new', '-out', '%(request_file)s',
+                               '-config', '%(ssl_config)s',
+                               '-subj', '%(cert_subject)s'])
+
+            self.exec_command(['openssl', 'ca', '-batch',
+                               '-out', '%(signing_cert)s',
+                               '-config', '%(ssl_config)s',
+                               '-days', '%(valid_days)dd',
+                               '-cert', '%(ca_cert)s',
+                               '-keyfile', '%(ca_private_key)s',
+                               '-infiles', '%(request_file)s'])
+
+    def run(self):
+        try:
+            existing_files = self.clean_up_existing_files()
+        except OSError:
+            print('An error occurred when rebuilding cert files.')
+            return
+        if existing_files:
+            print('The following cert files already exist, use --rebuild to '
+                  'remove the existing files before regenerating:')
+            for f in existing_files:
+                print('%s already exists' % f)
+            return
+
+        self.build_ssl_config_file()
+        self.build_ca_cert()
+        self.build_private_key()
+        self.build_signing_cert()
+
+
+class ConfigurePKI(BaseCertificateConfigure):
+    """Generate files for PKI signing using OpenSSL.
+
+    Signed tokens require a private key and signing certificate which itself
+    must be signed by a CA.  This class generates them with workable defaults
+    if each of the files are not present
+
+    """
+
+    def __init__(self, keystone_user, keystone_group, rebuild=False):
+        super(ConfigurePKI, self).__init__(CONF.signing, CONF.signing,
+                                           keystone_user, keystone_group,
+                                           rebuild=rebuild)
+
+
+class ConfigureSSL(BaseCertificateConfigure):
+    """Generate files for HTTPS using OpenSSL.
+
+    Creates a public/private key and certificates. If a CA is not given
+    one will be generated using provided arguments.
+    """
+
+    def __init__(self, keystone_user, keystone_group, rebuild=False):
+        super(ConfigureSSL, self).__init__(CONF.ssl, CONF.eventlet_server_ssl,
+                                           keystone_user, keystone_group,
+                                           rebuild=rebuild)
+
+
+BaseCertificateConfigure.sslconfig = """
+# OpenSSL configuration file.
+#
+
+# Establish working directory.
+
+dir            = %(conf_dir)s
+
+[ ca ]
+default_ca        = CA_default
+
+[ CA_default ]
+new_certs_dir     = $dir
+serial            = $dir/serial
+database          = $dir/index.txt
+default_days      = 365
+default_md        = %(default_md)s
+preserve          = no
+email_in_dn       = no
+nameopt           = default_ca
+certopt           = default_ca
+policy            = policy_anything
+x509_extensions   = usr_cert
+unique_subject    = no
+
+[ policy_anything ]
+countryName             = optional
+stateOrProvinceName     = optional
+organizationName        = optional
+organizationalUnitName  = optional
+commonName              = supplied
+emailAddress            = optional
+
+[ req ]
+default_bits       = 2048 # Size of keys
+default_keyfile    = key.pem # name of generated keys
+string_mask        = utf8only # permitted characters
+distinguished_name = req_distinguished_name
+req_extensions     = v3_req
+x509_extensions = v3_ca
+
+[ req_distinguished_name ]
+countryName                 = Country Name (2 letter code)
+countryName_min             = 2
+countryName_max             = 2
+stateOrProvinceName         = State or Province Name (full name)
+localityName                = Locality Name (city, district)
+0.organizationName          = Organization Name (company)
+organizationalUnitName      = Organizational Unit Name (department, division)
+commonName                  = Common Name (hostname, IP, or your name)
+commonName_max              = 64
+emailAddress                = Email Address
+emailAddress_max            = 64
+
+[ v3_ca ]
+basicConstraints       = CA:TRUE
+subjectKeyIdentifier   = hash
+authorityKeyIdentifier = keyid:always,issuer
+
+[ v3_req ]
+basicConstraints     = CA:FALSE
+keyUsage = nonRepudiation, digitalSignature, keyEncipherment
+
+[ usr_cert ]
+basicConstraints       = CA:FALSE
+subjectKeyIdentifier   = hash
+authorityKeyIdentifier = keyid:always
+"""
diff --git a/keystone-moon/keystone/common/pemutils.py b/keystone-moon/keystone/common/pemutils.py
new file mode 100755 (executable)
index 0000000..ddbe05c
--- /dev/null
@@ -0,0 +1,509 @@
+# Copyright 2013 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+"""
+PEM formatted data is used frequently in conjunction with X509 PKI as
+a data exchange mechanism for binary data. The acronym PEM stands for
+Privacy Enhanced Mail as defined in RFC-1421. Contrary to expectation
+the PEM format in common use has little to do with RFC-1421. Instead
+what we know as PEM format grew out of the need for a data exchange
+mechanism largely by the influence of OpenSSL. Other X509
+implementations have adopted it.
+
+Unfortunately PEM format has never been officially standarized. It's
+basic format is as follows:
+
+1) A header consisting of 5 hyphens followed by the word BEGIN and a
+single space. Then an upper case string describing the contents of the
+PEM block, this is followed by 5 hyphens and a newline.
+
+2) Binary data (typically in DER ASN.1 format) encoded in base64. The
+base64 text is line wrapped so that each line of base64 is 64
+characters long and terminated with a newline. The last line of base64
+text may be less than 64 characters. The content and format of the
+binary data is entirely dependent upon the type of data announced in
+the header and footer.
+
+3) A footer in the exact same as the header except the word BEGIN is
+replaced by END. The content name in both the header and footer should
+exactly match.
+
+The above is called a PEM block. It is permissible for multiple PEM
+blocks to appear in a single file or block of text. This is often used
+when specifying multiple X509 certificates.
+
+An example PEM block for a certificate is:
+
+-----BEGIN CERTIFICATE-----
+MIIC0TCCAjqgAwIBAgIJANsHKV73HYOwMA0GCSqGSIb3DQEBBQUAMIGeMQowCAYD
+VQQFEwE1MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVN1bm55
+dmFsZTESMBAGA1UEChMJT3BlblN0YWNrMREwDwYDVQQLEwhLZXlzdG9uZTElMCMG
+CSqGSIb3DQEJARYWa2V5c3RvbmVAb3BlbnN0YWNrLm9yZzEUMBIGA1UEAxMLU2Vs
+ZiBTaWduZWQwIBcNMTIxMTA1MTgxODI0WhgPMjA3MTA0MzAxODE4MjRaMIGeMQow
+CAYDVQQFEwE1MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVN1
+bm55dmFsZTESMBAGA1UEChMJT3BlblN0YWNrMREwDwYDVQQLEwhLZXlzdG9uZTEl
+MCMGCSqGSIb3DQEJARYWa2V5c3RvbmVAb3BlbnN0YWNrLm9yZzEUMBIGA1UEAxML
+U2VsZiBTaWduZWQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALzI17ExCaqd
+r7xY2Q5CBZ1bW1lsrXxS8eNJRdQtskDuQVAluY03/OGZd8HQYiiY/ci2tYy7BNIC
+bh5GaO95eqTDykJR3liOYE/tHbY6puQlj2ZivmhlSd2d5d7lF0/H28RQsLu9VktM
+uw6q9DpDm35jfrr8LgSeA3MdVqcS/4OhAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMB
+Af8wDQYJKoZIhvcNAQEFBQADgYEAjSQND7i1dNZtLKpWgX+JqMr3BdVlM15mFeVr
+C26ZspZjZVY5okdozO9gU3xcwRe4Cg30sKFOe6EBQKpkTZucFOXwBtD3h6dWJrdD
+c+m/CL/rs0GatDavbaIT2vv405SQUQooCdVh72LYel+4/a6xmRd7fQx3iEXN9QYj
+vmHJUcA=
+-----END CERTIFICATE-----
+
+PEM format is safe for transmission in 7-bit ASCII systems
+(i.e. standard email). Since 7-bit ASCII is a proper subset of UTF-8
+and Latin-1 it is not affected by transcoding between those
+charsets. Nor is PEM format affected by the choice of line
+endings. This makes PEM format particularity attractive for transport
+and storage of binary data.
+
+This module provides a number of utilities supporting the generation
+and consumption of PEM formatted data including:
+
+    * parse text and find all PEM blocks contained in the
+      text. Information on the location of the block in the text, the
+      type of PEM block, and it's base64 and binary data contents.
+
+    * parse text assumed to contain PEM data and return the binary
+      data.
+
+    * test if a block of text is a PEM block
+
+    * convert base64 text into a formatted PEM block
+
+    * convert binary data into a formatted PEM block
+
+    * access to the valid PEM types and their headers
+
+"""
+
+import base64
+import re
+
+import six
+
+from keystone.common import base64utils
+from keystone.i18n import _
+
+
+PEM_TYPE_TO_HEADER = {
+    u'cms': u'CMS',
+    u'dsa-private': u'DSA PRIVATE KEY',
+    u'dsa-public': u'DSA PUBLIC KEY',
+    u'ecdsa-public': u'ECDSA PUBLIC KEY',
+    u'ec-private': u'EC PRIVATE KEY',
+    u'pkcs7': u'PKCS7',
+    u'pkcs7-signed': u'PKCS',
+    u'pkcs8': u'ENCRYPTED PRIVATE KEY',
+    u'private-key': u'PRIVATE KEY',
+    u'public-key': u'PUBLIC KEY',
+    u'rsa-private': u'RSA PRIVATE KEY',
+    u'rsa-public': u'RSA PUBLIC KEY',
+    u'cert': u'CERTIFICATE',
+    u'crl': u'X509 CRL',
+    u'cert-pair': u'CERTIFICATE PAIR',
+    u'csr': u'CERTIFICATE REQUEST',
+}
+
+# This is not a 1-to-1 reverse map of PEM_TYPE_TO_HEADER
+# because it includes deprecated headers that map to 1 pem_type.
+PEM_HEADER_TO_TYPE = {
+    u'CMS': u'cms',
+    u'DSA PRIVATE KEY': u'dsa-private',
+    u'DSA PUBLIC KEY': u'dsa-public',
+    u'ECDSA PUBLIC KEY': u'ecdsa-public',
+    u'EC PRIVATE KEY': u'ec-private',
+    u'PKCS7': u'pkcs7',
+    u'PKCS': u'pkcs7-signed',
+    u'ENCRYPTED PRIVATE KEY': u'pkcs8',
+    u'PRIVATE KEY': u'private-key',
+    u'PUBLIC KEY': u'public-key',
+    u'RSA PRIVATE KEY': u'rsa-private',
+    u'RSA PUBLIC KEY': u'rsa-public',
+    u'CERTIFICATE': u'cert',
+    u'X509 CERTIFICATE': u'cert',
+    u'CERTIFICATE PAIR': u'cert-pair',
+    u'X509 CRL': u'crl',
+    u'CERTIFICATE REQUEST': u'csr',
+    u'NEW CERTIFICATE REQUEST': u'csr',
+}
+
+# List of valid pem_types
+pem_types = sorted(PEM_TYPE_TO_HEADER.keys())
+
+# List of valid pem_headers
+pem_headers = sorted(PEM_TYPE_TO_HEADER.values())
+
+_pem_begin_re = re.compile(r'^-{5}BEGIN\s+([^-]+)-{5}\s*$', re.MULTILINE)
+_pem_end_re = re.compile(r'^-{5}END\s+([^-]+)-{5}\s*$', re.MULTILINE)
+
+
+class PEMParseResult(object):
+    """Information returned when a PEM block is found in text.
+
+    PEMParseResult contains information about a PEM block discovered
+    while parsing text. The following properties are defined:
+
+    pem_type
+        A short hand name for the type of the PEM data, e.g. cert,
+        csr, crl, cms, key. Valid pem_types are listed in pem_types.
+        When the pem_type is set the pem_header is updated to match it.
+
+    pem_header
+        The text following '-----BEGIN ' in the PEM header.
+        Common examples are:
+
+            -----BEGIN CERTIFICATE-----
+            -----BEGIN CMS-----
+
+        Thus the pem_header would be CERTIFICATE and CMS respectively.
+        When the pem_header is set the pem_type is updated to match it.
+
+    pem_start, pem_end
+        The beginning and ending positions of the PEM block
+        including the PEM header and footer.
+
+    base64_start, base64_end
+        The beginning and ending positions of the base64 data
+        contained inside the PEM header and footer. Includes trailing
+        new line
+
+    binary_data
+        The decoded base64 data. None if not decoded.
+
+    """
+
+    def __init__(self, pem_type=None, pem_header=None,
+                 pem_start=None, pem_end=None,
+                 base64_start=None, base64_end=None,
+                 binary_data=None):
+
+        self._pem_type = None
+        self._pem_header = None
+
+        if pem_type is not None:
+            self.pem_type = pem_type
+
+        if pem_header is not None:
+            self.pem_header = pem_header
+
+        self.pem_start = pem_start
+        self.pem_end = pem_end
+        self.base64_start = base64_start
+        self.base64_end = base64_end
+        self.binary_data = binary_data
+
+    @property
+    def pem_type(self):
+        return self._pem_type
+
+    @pem_type.setter
+    def pem_type(self, pem_type):
+        if pem_type is None:
+            self._pem_type = None
+            self._pem_header = None
+        else:
+            pem_header = PEM_TYPE_TO_HEADER.get(pem_type)
+            if pem_header is None:
+                raise ValueError(_('unknown pem_type "%(pem_type)s", '
+                                   'valid types are: %(valid_pem_types)s') %
+                                 {'pem_type': pem_type,
+                                  'valid_pem_types': ', '.join(pem_types)})
+            self._pem_type = pem_type
+            self._pem_header = pem_header
+
+    @property
+    def pem_header(self):
+        return self._pem_header
+
+    @pem_header.setter
+    def pem_header(self, pem_header):
+        if pem_header is None:
+            self._pem_type = None
+            self._pem_header = None
+        else:
+            pem_type = PEM_HEADER_TO_TYPE.get(pem_header)
+            if pem_type is None:
+                raise ValueError(_('unknown pem header "%(pem_header)s", '
+                                   'valid headers are: '
+                                   '%(valid_pem_headers)s') %
+                                 {'pem_header': pem_header,
+                                  'valid_pem_headers':
+                                  ', '.join("'%s'" %
+                                            [x for x in pem_headers])})
+
+            self._pem_type = pem_type
+            self._pem_header = pem_header
+
+
+def pem_search(text, start=0):
+    """Search for a block of PEM formatted data
+
+    Search for a PEM block in a text string. The search begins at
+    start. If a PEM block is found a PEMParseResult object is
+    returned, otherwise if no PEM block is found None is returned.
+
+    If the pem_type is not the same in both the header and footer
+    a ValueError is raised.
+
+    The start and end positions are suitable for use as slices into
+    the text. To search for multiple PEM blocks pass pem_end as the
+    start position for the next iteration. Terminate the iteration
+    when None is returned. Example::
+
+        start = 0
+        while True:
+            block = pem_search(text, start)
+            if block is None:
+                break
+            base64_data = text[block.base64_start : block.base64_end]
+            start = block.pem_end
+
+    :param text: the text to search for PEM blocks
+    :type text: string
+    :param start: the position in text to start searching from (default: 0)
+    :type start: int
+    :returns: PEMParseResult or None if not found
+    :raises: ValueError
+    """
+
+    match = _pem_begin_re.search(text, pos=start)
+    if match:
+        pem_start = match.start()
+        begin_text = match.group(0)
+        base64_start = min(len(text), match.end() + 1)
+        begin_pem_header = match.group(1).strip()
+
+        match = _pem_end_re.search(text, pos=base64_start)
+        if match:
+            pem_end = min(len(text), match.end() + 1)
+            base64_end = match.start()
+            end_pem_header = match.group(1).strip()
+        else:
+            raise ValueError(_('failed to find end matching "%s"') %
+                             begin_text)
+
+        if begin_pem_header != end_pem_header:
+            raise ValueError(_('beginning & end PEM headers do not match '
+                               '(%(begin_pem_header)s'
+                               '!= '
+                               '%(end_pem_header)s)') %
+                             {'begin_pem_header': begin_pem_header,
+                              'end_pem_header': end_pem_header})
+    else:
+        return None
+
+    result = PEMParseResult(pem_header=begin_pem_header,
+                            pem_start=pem_start, pem_end=pem_end,
+                            base64_start=base64_start, base64_end=base64_end)
+
+    return result
+
+
+def parse_pem(text, pem_type=None, max_items=None):
+    """Scan text for PEM data, return list of PEM items
+
+    The input text is scanned for PEM blocks, for each one found a
+    PEMParseResult is constructed and added to the return list.
+
+    pem_type operates as a filter on the type of PEM desired. If
+    pem_type is specified only those PEM blocks which match will be
+    included. The pem_type is a logical name, not the actual text in
+    the pem header (e.g. 'cert'). If the pem_type is None all PEM
+    blocks are returned.
+
+    If max_items is specified the result is limited to that number of
+    items.
+
+    The return value is a list of PEMParseResult objects.  The
+    PEMParseResult provides complete information about the PEM block
+    including the decoded binary data for the PEM block.  The list is
+    ordered in the same order as found in the text.
+
+    Examples::
+
+        # Get all certs
+        certs = parse_pem(text, 'cert')
+
+        # Get the first cert
+        try:
+            binary_cert = parse_pem(text, 'cert', 1)[0].binary_data
+        except IndexError:
+            raise ValueError('no cert found')
+
+    :param text: The text to search for PEM blocks
+    :type text: string
+    :param pem_type: Only return data for this pem_type.
+                     Valid types are: csr, cert, crl, cms, key.
+                     If pem_type is None no filtering is performed.
+                     (default: None)
+    :type pem_type: string or None
+    :param max_items: Limit the number of blocks returned. (default: None)
+    :type max_items: int or None
+    :return: List of PEMParseResult, one for each PEM block found
+    :raises: ValueError, InvalidBase64Error
+    """
+
+    pem_blocks = []
+    start = 0
+
+    while True:
+        block = pem_search(text, start)
+        if block is None:
+            break
+        start = block.pem_end
+        if pem_type is None:
+            pem_blocks.append(block)
+        else:
+            try:
+                if block.pem_type == pem_type:
+                    pem_blocks.append(block)
+            except KeyError:
+                raise ValueError(_('unknown pem_type: "%s"') % (pem_type))
+
+        if max_items is not None and len(pem_blocks) >= max_items:
+            break
+
+    for block in pem_blocks:
+        base64_data = text[block.base64_start:block.base64_end]
+        try:
+            binary_data = base64.b64decode(base64_data)
+        except Exception as e:
+            block.binary_data = None
+            raise base64utils.InvalidBase64Error(
+                _('failed to base64 decode %(pem_type)s PEM at position'
+                  '%(position)d: %(err_msg)s') %
+                {'pem_type': block.pem_type,
+                 'position': block.pem_start,
+                 'err_msg': six.text_type(e)})
+        else:
+            block.binary_data = binary_data
+
+    return pem_blocks
+
+
+def get_pem_data(text, pem_type='cert'):
+    """Scan text for PEM data, return binary contents
+
+    The input text is scanned for a PEM block which matches the pem_type.
+    If found the binary data contained in the PEM block is returned.
+    If no PEM block is found or it does not match the specified pem type
+    None is returned.
+
+    :param text: The text to search for the PEM block
+    :type text: string
+    :param pem_type: Only return data for this pem_type.
+                     Valid types are: csr, cert, crl, cms, key.
+                     (default: 'cert')
+    :type pem_type: string
+    :return: binary data or None if not found.
+    """
+
+    blocks = parse_pem(text, pem_type, 1)
+    if not blocks:
+        return None
+    return blocks[0].binary_data
+
+
+def is_pem(text, pem_type='cert'):
+    """Does this text contain a PEM block.
+
+    Check for the existence of a PEM formatted block in the
+    text, if one is found verify it's contents can be base64
+    decoded, if so return True. Return False otherwise.
+
+    :param text: The text to search for PEM blocks
+    :type text: string
+    :param pem_type: Only return data for this pem_type.
+                     Valid types are: csr, cert, crl, cms, key.
+                     (default: 'cert')
+    :type pem_type: string
+    :returns: bool -- True if text contains PEM matching the pem_type,
+              False otherwise.
+    """
+
+    try:
+        pem_blocks = parse_pem(text, pem_type, max_items=1)
+    except base64utils.InvalidBase64Error:
+        return False
+
+    if pem_blocks:
+        return True
+    else:
+        return False
+
+
+def base64_to_pem(base64_text, pem_type='cert'):
+    """Format string of base64 text into PEM format
+
+    Input is assumed to consist only of members of the base64 alphabet
+    (i.e no whitepace). Use one of the filter functions from
+    base64utils to assure the input is clean
+    (i.e. strip_whitespace()).
+
+    :param base64_text: text containing ONLY base64 alphabet
+                        characters to be inserted into PEM output.
+    :type base64_text: string
+    :param pem_type: Produce a PEM block for this type.
+                     Valid types are: csr, cert, crl, cms, key.
+                     (default: 'cert')
+    :type pem_type: string
+    :returns: string -- PEM formatted text
+
+
+    """
+    pem_header = PEM_TYPE_TO_HEADER[pem_type]
+    buf = six.StringIO()
+
+    buf.write(u'-----BEGIN %s-----' % pem_header)
+    buf.write(u'\n')
+
+    for line in base64utils.base64_wrap_iter(base64_text, width=64):
+        buf.write(line)
+        buf.write(u'\n')
+
+    buf.write(u'-----END %s-----' % pem_header)
+    buf.write(u'\n')
+
+    text = buf.getvalue()
+    buf.close()
+    return text
+
+
+def binary_to_pem(binary_data, pem_type='cert'):
+    """Format binary data into PEM format
+
+    Example:
+
+        # get the certificate binary data in DER format
+        der_data = certificate.der
+        # convert the DER binary data into a PEM
+        pem = binary_to_pem(der_data, 'cert')
+
+
+    :param binary_data: binary data to encapsulate into PEM
+    :type binary_data: buffer
+    :param pem_type: Produce a PEM block for this type.
+                     Valid types are: csr, cert, crl, cms, key.
+                     (default: 'cert')
+    :type pem_type: string
+    :returns: string -- PEM formatted text
+
+    """
+    base64_text = base64.b64encode(binary_data)
+    return base64_to_pem(base64_text, pem_type)
diff --git a/keystone-moon/keystone/common/router.py b/keystone-moon/keystone/common/router.py
new file mode 100644 (file)
index 0000000..ce4e834
--- /dev/null
@@ -0,0 +1,80 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import json_home
+from keystone.common import wsgi
+
+
+class Router(wsgi.ComposableRouter):
+    def __init__(self, controller, collection_key, key,
+                 resource_descriptions=None,
+                 is_entity_implemented=True):
+        self.controller = controller
+        self.key = key
+        self.collection_key = collection_key
+        self._resource_descriptions = resource_descriptions
+        self._is_entity_implemented = is_entity_implemented
+
+    def add_routes(self, mapper):
+        collection_path = '/%(collection_key)s' % {
+            'collection_key': self.collection_key}
+        entity_path = '/%(collection_key)s/{%(key)s_id}' % {
+            'collection_key': self.collection_key,
+            'key': self.key}
+
+        mapper.connect(
+            collection_path,
+            controller=self.controller,
+            action='create_%s' % self.key,
+            conditions=dict(method=['POST']))
+        mapper.connect(
+            collection_path,
+            controller=self.controller,
+            action='list_%s' % self.collection_key,
+            conditions=dict(method=['GET']))
+        mapper.connect(
+            entity_path,
+            controller=self.controller,
+            action='get_%s' % self.key,
+            conditions=dict(method=['GET']))
+        mapper.connect(
+            entity_path,
+            controller=self.controller,
+            action='update_%s' % self.key,
+            conditions=dict(method=['PATCH']))
+        mapper.connect(
+            entity_path,
+            controller=self.controller,
+            action='delete_%s' % self.key,
+            conditions=dict(method=['DELETE']))
+
+        # Add the collection resource and entity resource to the resource
+        # descriptions.
+
+        collection_rel = json_home.build_v3_resource_relation(
+            self.collection_key)
+        rel_data = {'href': collection_path, }
+        self._resource_descriptions.append((collection_rel, rel_data))
+
+        if self._is_entity_implemented:
+            entity_rel = json_home.build_v3_resource_relation(self.key)
+            id_str = '%s_id' % self.key
+            id_param_rel = json_home.build_v3_parameter_relation(id_str)
+            entity_rel_data = {
+                'href-template': entity_path,
+                'href-vars': {
+                    id_str: id_param_rel,
+                },
+            }
+            self._resource_descriptions.append((entity_rel, entity_rel_data))
diff --git a/keystone-moon/keystone/common/sql/__init__.py b/keystone-moon/keystone/common/sql/__init__.py
new file mode 100644 (file)
index 0000000..84e0fb8
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common.sql.core import *  # noqa
diff --git a/keystone-moon/keystone/common/sql/core.py b/keystone-moon/keystone/common/sql/core.py
new file mode 100644 (file)
index 0000000..bf16870
--- /dev/null
@@ -0,0 +1,431 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""SQL backends for the various services.
+
+Before using this module, call initialize(). This has to be done before
+CONF() because it sets up configuration options.
+
+"""
+import contextlib
+import functools
+
+from oslo_config import cfg
+from oslo_db import exception as db_exception
+from oslo_db import options as db_options
+from oslo_db.sqlalchemy import models
+from oslo_db.sqlalchemy import session as db_session
+from oslo_log import log
+from oslo_serialization import jsonutils
+import six
+import sqlalchemy as sql
+from sqlalchemy.ext import declarative
+from sqlalchemy.orm.attributes import flag_modified, InstrumentedAttribute
+from sqlalchemy import types as sql_types
+
+from keystone.common import utils
+from keystone import exception
+from keystone.i18n import _
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+ModelBase = declarative.declarative_base()
+
+
+# For exporting to other modules
+Column = sql.Column
+Index = sql.Index
+String = sql.String
+Integer = sql.Integer
+Enum = sql.Enum
+ForeignKey = sql.ForeignKey
+DateTime = sql.DateTime
+IntegrityError = sql.exc.IntegrityError
+DBDuplicateEntry = db_exception.DBDuplicateEntry
+OperationalError = sql.exc.OperationalError
+NotFound = sql.orm.exc.NoResultFound
+Boolean = sql.Boolean
+Text = sql.Text
+UniqueConstraint = sql.UniqueConstraint
+PrimaryKeyConstraint = sql.PrimaryKeyConstraint
+joinedload = sql.orm.joinedload
+# Suppress flake8's unused import warning for flag_modified:
+flag_modified = flag_modified
+
+
+def initialize():
+    """Initialize the module."""
+
+    db_options.set_defaults(
+        CONF,
+        connection="sqlite:///keystone.db")
+
+
+def initialize_decorator(init):
+    """Ensure that the length of string field do not exceed the limit.
+
+    This decorator check the initialize arguments, to make sure the
+    length of string field do not exceed the length limit, or raise a
+    'StringLengthExceeded' exception.
+
+    Use decorator instead of inheritance, because the metaclass will
+    check the __tablename__, primary key columns, etc. at the class
+    definition.
+
+    """
+    def initialize(self, *args, **kwargs):
+        cls = type(self)
+        for k, v in kwargs.items():
+            if hasattr(cls, k):
+                attr = getattr(cls, k)
+                if isinstance(attr, InstrumentedAttribute):
+                    column = attr.property.columns[0]
+                    if isinstance(column.type, String):
+                        if not isinstance(v, six.text_type):
+                            v = six.text_type(v)
+                        if column.type.length and column.type.length < len(v):
+                            raise exception.StringLengthExceeded(
+                                string=v, type=k, length=column.type.length)
+
+        init(self, *args, **kwargs)
+    return initialize
+
+ModelBase.__init__ = initialize_decorator(ModelBase.__init__)
+
+
+# Special Fields
+class JsonBlob(sql_types.TypeDecorator):
+
+    impl = sql.Text
+
+    def process_bind_param(self, value, dialect):
+        return jsonutils.dumps(value)
+
+    def process_result_value(self, value, dialect):
+        return jsonutils.loads(value)
+
+
+class DictBase(models.ModelBase):
+    attributes = []
+
+    @classmethod
+    def from_dict(cls, d):
+        new_d = d.copy()
+
+        new_d['extra'] = {k: new_d.pop(k) for k in six.iterkeys(d)
+                          if k not in cls.attributes and k != 'extra'}
+
+        return cls(**new_d)
+
+    def to_dict(self, include_extra_dict=False):
+        """Returns the model's attributes as a dictionary.
+
+        If include_extra_dict is True, 'extra' attributes are literally
+        included in the resulting dictionary twice, for backwards-compatibility
+        with a broken implementation.
+
+        """
+        d = self.extra.copy()
+        for attr in self.__class__.attributes:
+            d[attr] = getattr(self, attr)
+
+        if include_extra_dict:
+            d['extra'] = self.extra.copy()
+
+        return d
+
+    def __getitem__(self, key):
+        if key in self.extra:
+            return self.extra[key]
+        return getattr(self, key)
+
+
+class ModelDictMixin(object):
+
+    @classmethod
+    def from_dict(cls, d):
+        """Returns a model instance from a dictionary."""
+        return cls(**d)
+
+    def to_dict(self):
+        """Returns the model's attributes as a dictionary."""
+        names = (column.name for column in self.__table__.columns)
+        return {name: getattr(self, name) for name in names}
+
+
+_engine_facade = None
+
+
+def _get_engine_facade():
+    global _engine_facade
+
+    if not _engine_facade:
+        _engine_facade = db_session.EngineFacade.from_config(CONF)
+
+    return _engine_facade
+
+
+def cleanup():
+    global _engine_facade
+
+    _engine_facade = None
+
+
+def get_engine():
+    return _get_engine_facade().get_engine()
+
+
+def get_session(expire_on_commit=False):
+    return _get_engine_facade().get_session(expire_on_commit=expire_on_commit)
+
+
+@contextlib.contextmanager
+def transaction(expire_on_commit=False):
+    """Return a SQLAlchemy session in a scoped transaction."""
+    session = get_session(expire_on_commit=expire_on_commit)
+    with session.begin():
+        yield session
+
+
+def truncated(f):
+    """Ensure list truncation is detected in Driver list entity methods.
+
+    This is designed to wrap and sql Driver list_{entity} methods in order to
+    calculate if the resultant list has been truncated. Provided a limit dict
+    is found in the hints list, we increment the limit by one so as to ask the
+    wrapped function for one more entity than the limit, and then once the list
+    has been generated, we check to see if the original limit has been
+    exceeded, in which case we truncate back to that limit and set the
+    'truncated' boolean to 'true' in the hints limit dict.
+
+    """
+    @functools.wraps(f)
+    def wrapper(self, hints, *args, **kwargs):
+        if not hasattr(hints, 'limit'):
+            raise exception.UnexpectedError(
+                _('Cannot truncate a driver call without hints list as '
+                  'first parameter after self '))
+
+        if hints.limit is None:
+            return f(self, hints, *args, **kwargs)
+
+        # A limit is set, so ask for one more entry than we need
+        list_limit = hints.limit['limit']
+        hints.set_limit(list_limit + 1)
+        ref_list = f(self, hints, *args, **kwargs)
+
+        # If we got more than the original limit then trim back the list and
+        # mark it truncated.  In both cases, make sure we set the limit back
+        # to its original value.
+        if len(ref_list) > list_limit:
+            hints.set_limit(list_limit, truncated=True)
+            return ref_list[:list_limit]
+        else:
+            hints.set_limit(list_limit)
+            return ref_list
+    return wrapper
+
+
+def _filter(model, query, hints):
+    """Applies filtering to a query.
+
+    :param model: the table model in question
+    :param query: query to apply filters to
+    :param hints: contains the list of filters yet to be satisfied.
+                  Any filters satisfied here will be removed so that
+                  the caller will know if any filters remain.
+
+    :returns query: query, updated with any filters satisfied
+
+    """
+    def inexact_filter(model, query, filter_, satisfied_filters, hints):
+        """Applies an inexact filter to a query.
+
+        :param model: the table model in question
+        :param query: query to apply filters to
+        :param filter_: the dict that describes this filter
+        :param satisfied_filters: a cumulative list of satisfied filters, to
+                                  which filter_ will be added if it is
+                                  satisfied.
+        :param hints: contains the list of filters yet to be satisfied.
+
+        :returns query: query updated to add any inexact filters we could
+                        satisfy
+
+        """
+        column_attr = getattr(model, filter_['name'])
+
+        # TODO(henry-nash): Sqlalchemy 0.7 defaults to case insensitivity
+        # so once we find a way of changing that (maybe on a call-by-call
+        # basis), we can add support for the case sensitive versions of
+        # the filters below.  For now, these case sensitive versions will
+        # be handled at the controller level.
+
+        if filter_['case_sensitive']:
+            return query
+
+        if filter_['comparator'] == 'contains':
+            query_term = column_attr.ilike('%%%s%%' % filter_['value'])
+        elif filter_['comparator'] == 'startswith':
+            query_term = column_attr.ilike('%s%%' % filter_['value'])
+        elif filter_['comparator'] == 'endswith':
+            query_term = column_attr.ilike('%%%s' % filter_['value'])
+        else:
+            # It's a filter we don't understand, so let the caller
+            # work out if they need to do something with it.
+            return query
+
+        satisfied_filters.append(filter_)
+        return query.filter(query_term)
+
+    def exact_filter(
+            model, filter_, satisfied_filters, cumulative_filter_dict, hints):
+        """Applies an exact filter to a query.
+
+        :param model: the table model in question
+        :param filter_: the dict that describes this filter
+        :param satisfied_filters: a cumulative list of satisfied filters, to
+                                  which filter_ will be added if it is
+                                  satisfied.
+        :param cumulative_filter_dict: a dict that describes the set of
+                                      exact filters built up so far
+        :param hints: contains the list of filters yet to be satisfied.
+
+        :returns: updated cumulative dict
+
+        """
+        key = filter_['name']
+        if isinstance(getattr(model, key).property.columns[0].type,
+                      sql.types.Boolean):
+            cumulative_filter_dict[key] = (
+                utils.attr_as_boolean(filter_['value']))
+        else:
+            cumulative_filter_dict[key] = filter_['value']
+        satisfied_filters.append(filter_)
+        return cumulative_filter_dict
+
+    filter_dict = {}
+    satisfied_filters = []
+    for filter_ in hints.filters:
+        if filter_['name'] not in model.attributes:
+            continue
+        if filter_['comparator'] == 'equals':
+            filter_dict = exact_filter(
+                model, filter_, satisfied_filters, filter_dict, hints)
+        else:
+            query = inexact_filter(
+                model, query, filter_, satisfied_filters, hints)
+
+    # Apply any exact filters we built up
+    if filter_dict:
+        query = query.filter_by(**filter_dict)
+
+    # Remove satisfied filters, then the caller will know remaining filters
+    for filter_ in satisfied_filters:
+        hints.filters.remove(filter_)
+
+    return query
+
+
+def _limit(query, hints):
+    """Applies a limit to a query.
+
+    :param query: query to apply filters to
+    :param hints: contains the list of filters and limit details.
+
+    :returns updated query
+
+    """
+    # NOTE(henry-nash): If we were to implement pagination, then we
+    # we would expand this method to support pagination and limiting.
+
+    # If we satisfied all the filters, set an upper limit if supplied
+    if hints.limit:
+        query = query.limit(hints.limit['limit'])
+    return query
+
+
+def filter_limit_query(model, query, hints):
+    """Applies filtering and limit to a query.
+
+    :param model: table model
+    :param query: query to apply filters to
+    :param hints: contains the list of filters and limit details.  This may
+                  be None, indicating that there are no filters or limits
+                  to be applied. If it's not None, then any filters
+                  satisfied here will be removed so that the caller will
+                  know if any filters remain.
+
+    :returns: updated query
+
+    """
+    if hints is None:
+        return query
+
+    # First try and satisfy any filters
+    query = _filter(model, query, hints)
+
+    # NOTE(henry-nash): Any unsatisfied filters will have been left in
+    # the hints list for the controller to handle. We can only try and
+    # limit here if all the filters are already satisfied since, if not,
+    # doing so might mess up the final results. If there are still
+    # unsatisfied filters, we have to leave any limiting to the controller
+    # as well.
+
+    if not hints.filters:
+        return _limit(query, hints)
+    else:
+        return query
+
+
+def handle_conflicts(conflict_type='object'):
+    """Converts select sqlalchemy exceptions into HTTP 409 Conflict."""
+    _conflict_msg = 'Conflict %(conflict_type)s: %(details)s'
+
+    def decorator(method):
+        @functools.wraps(method)
+        def wrapper(*args, **kwargs):
+            try:
+                return method(*args, **kwargs)
+            except db_exception.DBDuplicateEntry as e:
+                # LOG the exception for debug purposes, do not send the
+                # exception details out with the raised Conflict exception
+                # as it can contain raw SQL.
+                LOG.debug(_conflict_msg, {'conflict_type': conflict_type,
+                                          'details': six.text_type(e)})
+                raise exception.Conflict(type=conflict_type,
+                                         details=_('Duplicate Entry'))
+            except db_exception.DBError as e:
+                # TODO(blk-u): inspecting inner_exception breaks encapsulation;
+                # oslo_db should provide exception we need.
+                if isinstance(e.inner_exception, IntegrityError):
+                    # LOG the exception for debug purposes, do not send the
+                    # exception details out with the raised Conflict exception
+                    # as it can contain raw SQL.
+                    LOG.debug(_conflict_msg, {'conflict_type': conflict_type,
+                                              'details': six.text_type(e)})
+                    # NOTE(morganfainberg): This is really a case where the SQL
+                    # failed to store the data. This is not something that the
+                    # user has done wrong. Example would be a ForeignKey is
+                    # missing; the code that is executed before reaching the
+                    # SQL writing to the DB should catch the issue.
+                    raise exception.UnexpectedError(
+                        _('An unexpected error occurred when trying to '
+                          'store %s') % conflict_type)
+                raise
+
+        return wrapper
+    return decorator
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/README b/keystone-moon/keystone/common/sql/migrate_repo/README
new file mode 100644 (file)
index 0000000..6218f8c
--- /dev/null
@@ -0,0 +1,4 @@
+This is a database migration repository.
+
+More information at
+http://code.google.com/p/sqlalchemy-migrate/
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/__init__.py b/keystone-moon/keystone/common/sql/migrate_repo/__init__.py
new file mode 100644 (file)
index 0000000..f73dfc1
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright 2014 Mirantis.inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+DB_INIT_VERSION = 43
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/manage.py b/keystone-moon/keystone/common/sql/migrate_repo/manage.py
new file mode 100644 (file)
index 0000000..39fa389
--- /dev/null
@@ -0,0 +1,5 @@
+#!/usr/bin/env python
+from migrate.versioning.shell import main
+
+if __name__ == '__main__':
+    main(debug='False')
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/migrate.cfg b/keystone-moon/keystone/common/sql/migrate_repo/migrate.cfg
new file mode 100644 (file)
index 0000000..db531bb
--- /dev/null
@@ -0,0 +1,25 @@
+[db_settings]
+# Used to identify which repository this database is versioned under.
+# You can use the name of your project.
+repository_id=keystone
+
+# The name of the database table used to track the schema version.
+# This name shouldn't already be used by your project.
+# If this is changed once a database is under version control, you'll need to
+# change the table name in each database too.
+version_table=migrate_version
+
+# When committing a change script, Migrate will attempt to generate the
+# sql for all supported databases; normally, if one of them fails - probably
+# because you don't have that database installed - it is ignored and the
+# commit continues, perhaps ending successfully.
+# Databases in this list MUST compile successfully during a commit, or the
+# entire commit will fail. List the databases your application will actually
+# be using to ensure your updates to that database work properly.
+# This must be a list; example: ['postgres','sqlite']
+required_dbs=[]
+
+# When creating new change scripts, Migrate will stamp the new script with
+# a version number. By default this is latest_version + 1. You can set this
+# to 'true' to tell Migrate to use the UTC timestamp instead.
+use_timestamp_numbering=False
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/044_icehouse.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/044_icehouse.py
new file mode 100644 (file)
index 0000000..6f326ec
--- /dev/null
@@ -0,0 +1,279 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import migrate
+from oslo_config import cfg
+from oslo_log import log
+import sqlalchemy as sql
+from sqlalchemy import orm
+
+from keystone.assignment.backends import sql as assignment_sql
+from keystone.common import sql as ks_sql
+from keystone.common.sql import migration_helpers
+
+
+LOG = log.getLogger(__name__)
+CONF = cfg.CONF
+
+
+def upgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    if migrate_engine.name == 'mysql':
+        # In Folsom we explicitly converted migrate_version to UTF8.
+        migrate_engine.execute(
+            'ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8')
+        # Set default DB charset to UTF8.
+        migrate_engine.execute(
+            'ALTER DATABASE %s DEFAULT CHARACTER SET utf8' %
+            migrate_engine.url.database)
+
+    credential = sql.Table(
+        'credential', meta,
+        sql.Column('id', sql.String(length=64), primary_key=True),
+        sql.Column('user_id', sql.String(length=64), nullable=False),
+        sql.Column('project_id', sql.String(length=64)),
+        sql.Column('blob', ks_sql.JsonBlob, nullable=False),
+        sql.Column('type', sql.String(length=255), nullable=False),
+        sql.Column('extra', ks_sql.JsonBlob.impl),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+
+    domain = sql.Table(
+        'domain', meta,
+        sql.Column('id', sql.String(length=64), primary_key=True),
+        sql.Column('name', sql.String(length=64), nullable=False),
+        sql.Column('enabled', sql.Boolean, default=True, nullable=False),
+        sql.Column('extra', ks_sql.JsonBlob.impl),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+
+    endpoint = sql.Table(
+        'endpoint', meta,
+        sql.Column('id', sql.String(length=64), primary_key=True),
+        sql.Column('legacy_endpoint_id', sql.String(length=64)),
+        sql.Column('interface', sql.String(length=8), nullable=False),
+        sql.Column('region', sql.String(length=255)),
+        sql.Column('service_id', sql.String(length=64), nullable=False),
+        sql.Column('url', sql.Text, nullable=False),
+        sql.Column('extra', ks_sql.JsonBlob.impl),
+        sql.Column('enabled', sql.Boolean, nullable=False, default=True,
+                   server_default='1'),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+
+    group = sql.Table(
+        'group', meta,
+        sql.Column('id', sql.String(length=64), primary_key=True),
+        sql.Column('domain_id', sql.String(length=64), nullable=False),
+        sql.Column('name', sql.String(length=64), nullable=False),
+        sql.Column('description', sql.Text),
+        sql.Column('extra', ks_sql.JsonBlob.impl),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+
+    policy = sql.Table(
+        'policy', meta,
+        sql.Column('id', sql.String(length=64), primary_key=True),
+        sql.Column('type', sql.String(length=255), nullable=False),
+        sql.Column('blob', ks_sql.JsonBlob, nullable=False),
+        sql.Column('extra', ks_sql.JsonBlob.impl),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+
+    project = sql.Table(
+        'project', meta,
+        sql.Column('id', sql.String(length=64), primary_key=True),
+        sql.Column('name', sql.String(length=64), nullable=False),
+        sql.Column('extra', ks_sql.JsonBlob.impl),
+        sql.Column('description', sql.Text),
+        sql.Column('enabled', sql.Boolean),
+        sql.Column('domain_id', sql.String(length=64), nullable=False),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+
+    role = sql.Table(
+        'role', meta,
+        sql.Column('id', sql.String(length=64), primary_key=True),
+        sql.Column('name', sql.String(length=255), nullable=False),
+        sql.Column('extra', ks_sql.JsonBlob.impl),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+
+    service = sql.Table(
+        'service', meta,
+        sql.Column('id', sql.String(length=64), primary_key=True),
+        sql.Column('type', sql.String(length=255)),
+        sql.Column('enabled', sql.Boolean, nullable=False, default=True,
+                   server_default='1'),
+        sql.Column('extra', ks_sql.JsonBlob.impl),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+
+    token = sql.Table(
+        'token', meta,
+        sql.Column('id', sql.String(length=64), primary_key=True),
+        sql.Column('expires', sql.DateTime, default=None),
+        sql.Column('extra', ks_sql.JsonBlob.impl),
+        sql.Column('valid', sql.Boolean, default=True, nullable=False),
+        sql.Column('trust_id', sql.String(length=64)),
+        sql.Column('user_id', sql.String(length=64)),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+
+    trust = sql.Table(
+        'trust', meta,
+        sql.Column('id', sql.String(length=64), primary_key=True),
+        sql.Column('trustor_user_id', sql.String(length=64), nullable=False),
+        sql.Column('trustee_user_id', sql.String(length=64), nullable=False),
+        sql.Column('project_id', sql.String(length=64)),
+        sql.Column('impersonation', sql.Boolean, nullable=False),
+        sql.Column('deleted_at', sql.DateTime),
+        sql.Column('expires_at', sql.DateTime),
+        sql.Column('remaining_uses', sql.Integer, nullable=True),
+        sql.Column('extra', ks_sql.JsonBlob.impl),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+
+    trust_role = sql.Table(
+        'trust_role', meta,
+        sql.Column('trust_id', sql.String(length=64), primary_key=True,
+                   nullable=False),
+        sql.Column('role_id', sql.String(length=64), primary_key=True,
+                   nullable=False),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+
+    user = sql.Table(
+        'user', meta,
+        sql.Column('id', sql.String(length=64), primary_key=True),
+        sql.Column('name', sql.String(length=255), nullable=False),
+        sql.Column('extra', ks_sql.JsonBlob.impl),
+        sql.Column('password', sql.String(length=128)),
+        sql.Column('enabled', sql.Boolean),
+        sql.Column('domain_id', sql.String(length=64), nullable=False),
+        sql.Column('default_project_id', sql.String(length=64)),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+
+    user_group_membership = sql.Table(
+        'user_group_membership', meta,
+        sql.Column('user_id', sql.String(length=64), primary_key=True),
+        sql.Column('group_id', sql.String(length=64), primary_key=True),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+
+    region = sql.Table(
+        'region',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('description', sql.String(255), nullable=False),
+        sql.Column('parent_region_id', sql.String(64), nullable=True),
+        sql.Column('extra', sql.Text()),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+
+    assignment = sql.Table(
+        'assignment',
+        meta,
+        sql.Column('type', sql.Enum(
+            assignment_sql.AssignmentType.USER_PROJECT,
+            assignment_sql.AssignmentType.GROUP_PROJECT,
+            assignment_sql.AssignmentType.USER_DOMAIN,
+            assignment_sql.AssignmentType.GROUP_DOMAIN,
+            name='type'),
+            nullable=False),
+        sql.Column('actor_id', sql.String(64), nullable=False),
+        sql.Column('target_id', sql.String(64), nullable=False),
+        sql.Column('role_id', sql.String(64), nullable=False),
+        sql.Column('inherited', sql.Boolean, default=False, nullable=False),
+        sql.PrimaryKeyConstraint('type', 'actor_id', 'target_id', 'role_id'),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+
+    # create all tables
+    tables = [credential, domain, endpoint, group,
+              policy, project, role, service,
+              token, trust, trust_role, user,
+              user_group_membership, region, assignment]
+
+    for table in tables:
+        try:
+            table.create()
+        except Exception:
+            LOG.exception('Exception while creating table: %r', table)
+            raise
+
+    # Unique Constraints
+    migrate.UniqueConstraint(user.c.domain_id,
+                             user.c.name,
+                             name='ixu_user_name_domain_id').create()
+    migrate.UniqueConstraint(group.c.domain_id,
+                             group.c.name,
+                             name='ixu_group_name_domain_id').create()
+    migrate.UniqueConstraint(role.c.name,
+                             name='ixu_role_name').create()
+    migrate.UniqueConstraint(project.c.domain_id,
+                             project.c.name,
+                             name='ixu_project_name_domain_id').create()
+    migrate.UniqueConstraint(domain.c.name,
+                             name='ixu_domain_name').create()
+
+    # Indexes
+    sql.Index('ix_token_expires', token.c.expires).create()
+    sql.Index('ix_token_expires_valid', token.c.expires,
+              token.c.valid).create()
+
+    fkeys = [
+        {'columns': [endpoint.c.service_id],
+         'references': [service.c.id]},
+
+        {'columns': [user_group_membership.c.group_id],
+         'references': [group.c.id],
+         'name': 'fk_user_group_membership_group_id'},
+
+        {'columns': [user_group_membership.c.user_id],
+         'references':[user.c.id],
+         'name': 'fk_user_group_membership_user_id'},
+
+        {'columns': [user.c.domain_id],
+         'references': [domain.c.id],
+         'name': 'fk_user_domain_id'},
+
+        {'columns': [group.c.domain_id],
+         'references': [domain.c.id],
+         'name': 'fk_group_domain_id'},
+
+        {'columns': [project.c.domain_id],
+         'references': [domain.c.id],
+         'name': 'fk_project_domain_id'},
+
+        {'columns': [assignment.c.role_id],
+         'references': [role.c.id]}
+    ]
+
+    for fkey in fkeys:
+        migrate.ForeignKeyConstraint(columns=fkey['columns'],
+                                     refcolumns=fkey['references'],
+                                     name=fkey.get('name')).create()
+
+    # Create the default domain.
+    session = orm.sessionmaker(bind=migrate_engine)()
+    domain.insert(migration_helpers.get_default_domain()).execute()
+    session.commit()
+
+
+def downgrade(migrate_engine):
+    raise NotImplementedError('Downgrade to pre-Icehouse release db schema is '
+                              'unsupported.')
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/045_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/045_placeholder.py
new file mode 100644 (file)
index 0000000..b6f4071
--- /dev/null
@@ -0,0 +1,25 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# This is a placeholder for Icehouse backports. Do not use this number for new
+# Juno work. New Juno work starts after all the placeholders.
+#
+# See blueprint reserved-db-migrations-icehouse and the related discussion:
+# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
+
+
+def upgrade(migrate_engine):
+    pass
+
+
+def downgrade(migration_engine):
+    pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/046_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/046_placeholder.py
new file mode 100644 (file)
index 0000000..b6f4071
--- /dev/null
@@ -0,0 +1,25 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# This is a placeholder for Icehouse backports. Do not use this number for new
+# Juno work. New Juno work starts after all the placeholders.
+#
+# See blueprint reserved-db-migrations-icehouse and the related discussion:
+# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
+
+
+def upgrade(migrate_engine):
+    pass
+
+
+def downgrade(migration_engine):
+    pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/047_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/047_placeholder.py
new file mode 100644 (file)
index 0000000..b6f4071
--- /dev/null
@@ -0,0 +1,25 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# This is a placeholder for Icehouse backports. Do not use this number for new
+# Juno work. New Juno work starts after all the placeholders.
+#
+# See blueprint reserved-db-migrations-icehouse and the related discussion:
+# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
+
+
+def upgrade(migrate_engine):
+    pass
+
+
+def downgrade(migration_engine):
+    pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/048_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/048_placeholder.py
new file mode 100644 (file)
index 0000000..b6f4071
--- /dev/null
@@ -0,0 +1,25 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# This is a placeholder for Icehouse backports. Do not use this number for new
+# Juno work. New Juno work starts after all the placeholders.
+#
+# See blueprint reserved-db-migrations-icehouse and the related discussion:
+# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
+
+
+def upgrade(migrate_engine):
+    pass
+
+
+def downgrade(migration_engine):
+    pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/049_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/049_placeholder.py
new file mode 100644 (file)
index 0000000..b6f4071
--- /dev/null
@@ -0,0 +1,25 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# This is a placeholder for Icehouse backports. Do not use this number for new
+# Juno work. New Juno work starts after all the placeholders.
+#
+# See blueprint reserved-db-migrations-icehouse and the related discussion:
+# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
+
+
+def upgrade(migrate_engine):
+    pass
+
+
+def downgrade(migration_engine):
+    pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/050_fk_consistent_indexes.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/050_fk_consistent_indexes.py
new file mode 100644 (file)
index 0000000..535a094
--- /dev/null
@@ -0,0 +1,49 @@
+# Copyright 2014 Mirantis.inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sa
+
+
+def upgrade(migrate_engine):
+
+    if migrate_engine.name == 'mysql':
+        meta = sa.MetaData(bind=migrate_engine)
+        endpoint = sa.Table('endpoint', meta, autoload=True)
+
+        # NOTE(i159): MySQL requires indexes on referencing columns, and those
+        # indexes create automatically. That those indexes will have different
+        # names, depending on version of MySQL used. We shoud make this naming
+        # consistent, by reverting index name to a consistent condition.
+        if any(i for i in endpoint.indexes if
+               i.columns.keys() == ['service_id'] and i.name != 'service_id'):
+            # NOTE(i159): by this action will be made re-creation of an index
+            # with the new name. This can be considered as renaming under the
+            # MySQL rules.
+            sa.Index('service_id', endpoint.c.service_id).create()
+
+        user_group_membership = sa.Table('user_group_membership',
+                                         meta, autoload=True)
+
+        if any(i for i in user_group_membership.indexes if
+               i.columns.keys() == ['group_id'] and i.name != 'group_id'):
+            sa.Index('group_id', user_group_membership.c.group_id).create()
+
+
+def downgrade(migrate_engine):
+    # NOTE(i159): index exists only in MySQL schemas, and got an inconsistent
+    # name only when MySQL 5.5 renamed it after re-creation
+    # (during migrations). So we just fixed inconsistency, there is no
+    # necessity to revert it.
+    pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/051_add_id_mapping.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/051_add_id_mapping.py
new file mode 100644 (file)
index 0000000..074fbb6
--- /dev/null
@@ -0,0 +1,49 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+from keystone.identity.mapping_backends import mapping
+
+
+MAPPING_TABLE = 'id_mapping'
+
+
+def upgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    mapping_table = sql.Table(
+        MAPPING_TABLE,
+        meta,
+        sql.Column('public_id', sql.String(64), primary_key=True),
+        sql.Column('domain_id', sql.String(64), nullable=False),
+        sql.Column('local_id', sql.String(64), nullable=False),
+        sql.Column('entity_type', sql.Enum(
+            mapping.EntityType.USER,
+            mapping.EntityType.GROUP,
+            name='entity_type'),
+            nullable=False),
+        sql.UniqueConstraint('domain_id', 'local_id', 'entity_type'),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+    mapping_table.create(migrate_engine, checkfirst=True)
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    assignment = sql.Table(MAPPING_TABLE, meta, autoload=True)
+    assignment.drop(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/052_add_auth_url_to_region.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/052_add_auth_url_to_region.py
new file mode 100644 (file)
index 0000000..9f1fd9f
--- /dev/null
@@ -0,0 +1,34 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+_REGION_TABLE_NAME = 'region'
+
+
+def upgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
+    url_column = sql.Column('url', sql.String(255), nullable=True)
+    region_table.create_column(url_column)
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
+    region_table.drop_column('url')
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/053_endpoint_to_region_association.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/053_endpoint_to_region_association.py
new file mode 100644 (file)
index 0000000..6dc0004
--- /dev/null
@@ -0,0 +1,156 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+"""Migrated the endpoint 'region' column to 'region_id.
+
+In addition to the rename, the new column is made a foreign key to the
+respective 'region' in the region table, ensuring that we auto-create
+any regions that are missing.  Further, since the old region column
+was 255 chars, and the id column in the region table is 64 chars, the size
+of the id column in the region table is increased to match.
+
+To Upgrade:
+
+
+Region Table
+
+Increase the size of the if column in the region table
+
+Endpoint Table
+
+a. Add the endpoint region_id column, that is a foreign key to the region table
+b. For each endpoint
+    i. Ensure there is matching region in region table, and if not, create it
+    ii. Assign the id to the region_id column
+c. Remove the column region
+
+
+To Downgrade:
+
+Endpoint Table
+
+a. Add back in the region column
+b. For each endpoint
+    i. Copy the region_id column to the region column
+c. Remove the column region_id
+
+Region Table
+
+Decrease the size of the id column in the region table, making sure that
+we don't get classing primary keys.
+
+"""
+
+import migrate
+import six
+import sqlalchemy as sql
+from sqlalchemy.orm import sessionmaker
+
+
+def _migrate_to_region_id(migrate_engine, region_table, endpoint_table):
+    endpoints = list(endpoint_table.select().execute())
+
+    for endpoint in endpoints:
+        if endpoint.region is None:
+            continue
+
+        region = list(region_table.select(
+            whereclause=region_table.c.id == endpoint.region).execute())
+        if len(region) == 1:
+            region_id = region[0].id
+        else:
+            region_id = endpoint.region
+            region = {'id': region_id,
+                      'description': '',
+                      'extra': '{}'}
+            session = sessionmaker(bind=migrate_engine)()
+            region_table.insert(region).execute()
+            session.commit()
+
+        new_values = {'region_id': region_id}
+        f = endpoint_table.c.id == endpoint.id
+        update = endpoint_table.update().where(f).values(new_values)
+        migrate_engine.execute(update)
+
+    migrate.ForeignKeyConstraint(
+        columns=[endpoint_table.c.region_id],
+        refcolumns=[region_table.c.id],
+        name='fk_endpoint_region_id').create()
+
+
+def _migrate_to_region(migrate_engine, region_table, endpoint_table):
+    endpoints = list(endpoint_table.select().execute())
+
+    for endpoint in endpoints:
+        new_values = {'region': endpoint.region_id}
+        f = endpoint_table.c.id == endpoint.id
+        update = endpoint_table.update().where(f).values(new_values)
+        migrate_engine.execute(update)
+
+    if 'sqlite' != migrate_engine.name:
+        migrate.ForeignKeyConstraint(
+            columns=[endpoint_table.c.region_id],
+            refcolumns=[region_table.c.id],
+            name='fk_endpoint_region_id').drop()
+    endpoint_table.c.region_id.drop()
+
+
+def _prepare_regions_for_id_truncation(migrate_engine, region_table):
+    """Ensure there are no IDs that are bigger than 64 chars.
+
+    The size of the id and parent_id fields where increased from 64 to 255
+    during the upgrade.  On downgrade we have to make sure that the ids can
+    fit in the new column size. For rows with ids greater than this, we have
+    no choice but to dump them.
+
+    """
+    for region in list(region_table.select().execute()):
+        if (len(six.text_type(region.id)) > 64 or
+                len(six.text_type(region.parent_region_id)) > 64):
+            delete = region_table.delete(region_table.c.id == region.id)
+            migrate_engine.execute(delete)
+
+
+def upgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    region_table = sql.Table('region', meta, autoload=True)
+    region_table.c.id.alter(type=sql.String(length=255))
+    region_table.c.parent_region_id.alter(type=sql.String(length=255))
+    endpoint_table = sql.Table('endpoint', meta, autoload=True)
+    region_id_column = sql.Column('region_id',
+                                  sql.String(length=255), nullable=True)
+    region_id_column.create(endpoint_table)
+
+    _migrate_to_region_id(migrate_engine, region_table, endpoint_table)
+
+    endpoint_table.c.region.drop()
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    region_table = sql.Table('region', meta, autoload=True)
+    endpoint_table = sql.Table('endpoint', meta, autoload=True)
+    region_column = sql.Column('region', sql.String(length=255))
+    region_column.create(endpoint_table)
+
+    _migrate_to_region(migrate_engine, region_table, endpoint_table)
+    _prepare_regions_for_id_truncation(migrate_engine, region_table)
+
+    region_table.c.id.alter(type=sql.String(length=64))
+    region_table.c.parent_region_id.alter(type=sql.String(length=64))
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/054_add_actor_id_index.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/054_add_actor_id_index.py
new file mode 100644 (file)
index 0000000..33b13b7
--- /dev/null
@@ -0,0 +1,35 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+ASSIGNMENT_TABLE = 'assignment'
+
+
+def upgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    assignment = sql.Table(ASSIGNMENT_TABLE, meta, autoload=True)
+    idx = sql.Index('ix_actor_id', assignment.c.actor_id)
+    idx.create(migrate_engine)
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    assignment = sql.Table(ASSIGNMENT_TABLE, meta, autoload=True)
+    idx = sql.Index('ix_actor_id', assignment.c.actor_id)
+    idx.drop(migrate_engine)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/055_add_indexes_to_token_table.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/055_add_indexes_to_token_table.py
new file mode 100644 (file)
index 0000000..1cfddd3
--- /dev/null
@@ -0,0 +1,35 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Add indexes to `user_id` and `trust_id` columns for the `token` table."""
+
+import sqlalchemy as sql
+
+
+def upgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    token = sql.Table('token', meta, autoload=True)
+
+    sql.Index('ix_token_user_id', token.c.user_id).create()
+    sql.Index('ix_token_trust_id', token.c.trust_id).create()
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    token = sql.Table('token', meta, autoload=True)
+
+    sql.Index('ix_token_user_id', token.c.user_id).drop()
+    sql.Index('ix_token_trust_id', token.c.trust_id).drop()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/056_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/056_placeholder.py
new file mode 100644 (file)
index 0000000..5f82254
--- /dev/null
@@ -0,0 +1,22 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# This is a placeholder for Juno backports. Do not use this number for new
+# Kilo work. New Kilo work starts after all the placeholders.
+
+
+def upgrade(migrate_engine):
+    pass
+
+
+def downgrade(migration_engine):
+    pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/057_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/057_placeholder.py
new file mode 100644 (file)
index 0000000..5f82254
--- /dev/null
@@ -0,0 +1,22 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# This is a placeholder for Juno backports. Do not use this number for new
+# Kilo work. New Kilo work starts after all the placeholders.
+
+
+def upgrade(migrate_engine):
+    pass
+
+
+def downgrade(migration_engine):
+    pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/058_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/058_placeholder.py
new file mode 100644 (file)
index 0000000..5f82254
--- /dev/null
@@ -0,0 +1,22 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# This is a placeholder for Juno backports. Do not use this number for new
+# Kilo work. New Kilo work starts after all the placeholders.
+
+
+def upgrade(migrate_engine):
+    pass
+
+
+def downgrade(migration_engine):
+    pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/059_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/059_placeholder.py
new file mode 100644 (file)
index 0000000..5f82254
--- /dev/null
@@ -0,0 +1,22 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# This is a placeholder for Juno backports. Do not use this number for new
+# Kilo work. New Kilo work starts after all the placeholders.
+
+
+def upgrade(migrate_engine):
+    pass
+
+
+def downgrade(migration_engine):
+    pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/060_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/060_placeholder.py
new file mode 100644 (file)
index 0000000..5f82254
--- /dev/null
@@ -0,0 +1,22 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# This is a placeholder for Juno backports. Do not use this number for new
+# Kilo work. New Kilo work starts after all the placeholders.
+
+
+def upgrade(migrate_engine):
+    pass
+
+
+def downgrade(migration_engine):
+    pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/061_add_parent_project.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/061_add_parent_project.py
new file mode 100644 (file)
index 0000000..bb8ef9f
--- /dev/null
@@ -0,0 +1,54 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+from keystone.common.sql import migration_helpers
+
+_PROJECT_TABLE_NAME = 'project'
+_PARENT_ID_COLUMN_NAME = 'parent_id'
+
+
+def list_constraints(project_table):
+    constraints = [{'table': project_table,
+                    'fk_column': _PARENT_ID_COLUMN_NAME,
+                    'ref_column': project_table.c.id}]
+
+    return constraints
+
+
+def upgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True)
+    parent_id = sql.Column(_PARENT_ID_COLUMN_NAME, sql.String(64),
+                           nullable=True)
+    project_table.create_column(parent_id)
+
+    if migrate_engine.name == 'sqlite':
+        return
+    migration_helpers.add_constraints(list_constraints(project_table))
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True)
+
+    # SQLite does not support constraints, and querying the constraints
+    # raises an exception
+    if migrate_engine.name != 'sqlite':
+        migration_helpers.remove_constraints(list_constraints(project_table))
+
+    project_table.drop_column(_PARENT_ID_COLUMN_NAME)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/062_drop_assignment_role_fk.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/062_drop_assignment_role_fk.py
new file mode 100644 (file)
index 0000000..5a33486
--- /dev/null
@@ -0,0 +1,41 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy
+
+from keystone.common.sql import migration_helpers
+
+
+def list_constraints(migrate_engine):
+    meta = sqlalchemy.MetaData()
+    meta.bind = migrate_engine
+    assignment_table = sqlalchemy.Table('assignment', meta, autoload=True)
+    role_table = sqlalchemy.Table('role', meta, autoload=True)
+
+    constraints = [{'table': assignment_table,
+                    'fk_column': 'role_id',
+                    'ref_column': role_table.c.id}]
+    return constraints
+
+
+def upgrade(migrate_engine):
+    # SQLite does not support constraints, and querying the constraints
+    # raises an exception
+    if migrate_engine.name == 'sqlite':
+        return
+    migration_helpers.remove_constraints(list_constraints(migrate_engine))
+
+
+def downgrade(migrate_engine):
+    if migrate_engine.name == 'sqlite':
+        return
+    migration_helpers.add_constraints(list_constraints(migrate_engine))
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/063_drop_region_auth_url.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/063_drop_region_auth_url.py
new file mode 100644 (file)
index 0000000..109a841
--- /dev/null
@@ -0,0 +1,32 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+_REGION_TABLE_NAME = 'region'
+
+
+def upgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
+    region_table.drop_column('url')
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
+    url_column = sql.Column('url', sql.String(255), nullable=True)
+    region_table.create_column(url_column)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/064_drop_user_and_group_fk.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/064_drop_user_and_group_fk.py
new file mode 100644 (file)
index 0000000..bca0090
--- /dev/null
@@ -0,0 +1,45 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy
+
+from keystone.common.sql import migration_helpers
+
+
+def list_constraints(migrate_engine):
+    meta = sqlalchemy.MetaData()
+    meta.bind = migrate_engine
+    user_table = sqlalchemy.Table('user', meta, autoload=True)
+    group_table = sqlalchemy.Table('group', meta, autoload=True)
+    domain_table = sqlalchemy.Table('domain', meta, autoload=True)
+
+    constraints = [{'table': user_table,
+                    'fk_column': 'domain_id',
+                    'ref_column': domain_table.c.id},
+                   {'table': group_table,
+                    'fk_column': 'domain_id',
+                    'ref_column': domain_table.c.id}]
+    return constraints
+
+
+def upgrade(migrate_engine):
+    # SQLite does not support constraints, and querying the constraints
+    # raises an exception
+    if migrate_engine.name == 'sqlite':
+        return
+    migration_helpers.remove_constraints(list_constraints(migrate_engine))
+
+
+def downgrade(migrate_engine):
+    if migrate_engine.name == 'sqlite':
+        return
+    migration_helpers.add_constraints(list_constraints(migrate_engine))
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/065_add_domain_config.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/065_add_domain_config.py
new file mode 100644 (file)
index 0000000..fd8717d
--- /dev/null
@@ -0,0 +1,55 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+from keystone.common import sql as ks_sql
+
+WHITELIST_TABLE = 'whitelisted_config'
+SENSITIVE_TABLE = 'sensitive_config'
+
+
+def upgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    whitelist_table = sql.Table(
+        WHITELIST_TABLE,
+        meta,
+        sql.Column('domain_id', sql.String(64), primary_key=True),
+        sql.Column('group', sql.String(255), primary_key=True),
+        sql.Column('option', sql.String(255), primary_key=True),
+        sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+    whitelist_table.create(migrate_engine, checkfirst=True)
+
+    sensitive_table = sql.Table(
+        SENSITIVE_TABLE,
+        meta,
+        sql.Column('domain_id', sql.String(64), primary_key=True),
+        sql.Column('group', sql.String(255), primary_key=True),
+        sql.Column('option', sql.String(255), primary_key=True),
+        sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+    sensitive_table.create(migrate_engine, checkfirst=True)
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    table = sql.Table(WHITELIST_TABLE, meta, autoload=True)
+    table.drop(migrate_engine, checkfirst=True)
+    table = sql.Table(SENSITIVE_TABLE, meta, autoload=True)
+    table.drop(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py
new file mode 100644 (file)
index 0000000..3feadc5
--- /dev/null
@@ -0,0 +1,43 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_serialization import jsonutils
+import sqlalchemy as sql
+
+
+def upgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    service_table = sql.Table('service', meta, autoload=True)
+    services = list(service_table.select().execute())
+
+    for service in services:
+        extra_dict = jsonutils.loads(service.extra)
+        # Skip records where service is not null
+        if extra_dict.get('name') is not None:
+            continue
+        # Default the name to empty string
+        extra_dict['name'] = ''
+        new_values = {
+            'extra': jsonutils.dumps(extra_dict),
+        }
+        f = service_table.c.id == service.id
+        update = service_table.update().where(f).values(new_values)
+        migrate_engine.execute(update)
+
+
+def downgrade(migration_engine):
+    # The upgrade fixes the data inconsistency for the service name,
+    # it defaults the value to empty string. There is no necessity
+    # to revert it.
+    pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/__init__.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/common/sql/migration_helpers.py b/keystone-moon/keystone/common/sql/migration_helpers.py
new file mode 100644 (file)
index 0000000..8693299
--- /dev/null
@@ -0,0 +1,258 @@
+# Copyright 2013 OpenStack Foundation
+# Copyright 2013 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+import sys
+
+import migrate
+from migrate import exceptions
+from oslo_config import cfg
+from oslo_db.sqlalchemy import migration
+from oslo_serialization import jsonutils
+from oslo_utils import importutils
+import six
+import sqlalchemy
+
+from keystone.common import sql
+from keystone.common.sql import migrate_repo
+from keystone import contrib
+from keystone import exception
+from keystone.i18n import _
+
+
+CONF = cfg.CONF
+DEFAULT_EXTENSIONS = ['endpoint_filter',
+                      'endpoint_policy',
+                      'federation',
+                      'oauth1',
+                      'revoke',
+                      ]
+
+
+def get_default_domain():
+    # Return the reference used for the default domain structure during
+    # sql migrations.
+    return {
+        'id': CONF.identity.default_domain_id,
+        'name': 'Default',
+        'enabled': True,
+        'extra': jsonutils.dumps({'description': 'Owns users and tenants '
+                                                 '(i.e. projects) available '
+                                                 'on Identity API v2.'})}
+
+
+#  Different RDBMSs use different schemes for naming the Foreign Key
+#  Constraints.  SQLAlchemy does not yet attempt to determine the name
+#  for the constraint, and instead attempts to deduce it from the column.
+#  This fails on MySQL.
+def get_constraints_names(table, column_name):
+    fkeys = [fk.name for fk in table.constraints
+             if (isinstance(fk, sqlalchemy.ForeignKeyConstraint) and
+                 column_name in fk.columns)]
+    return fkeys
+
+
+#  remove_constraints and add_constraints both accept a list of dictionaries
+#  that contain:
+#  {'table': a sqlalchemy table. The constraint is added to dropped from
+#           this table.
+#  'fk_column': the name of a column on the above table,  The constraint
+#               is added to or dropped from this column
+#  'ref_column':a sqlalchemy column object.  This is the reference column
+#               for the constraint.
+def remove_constraints(constraints):
+    for constraint_def in constraints:
+        constraint_names = get_constraints_names(constraint_def['table'],
+                                                 constraint_def['fk_column'])
+        for constraint_name in constraint_names:
+            migrate.ForeignKeyConstraint(
+                columns=[getattr(constraint_def['table'].c,
+                                 constraint_def['fk_column'])],
+                refcolumns=[constraint_def['ref_column']],
+                name=constraint_name).drop()
+
+
+def add_constraints(constraints):
+    for constraint_def in constraints:
+
+        if constraint_def['table'].kwargs.get('mysql_engine') == 'MyISAM':
+            # Don't try to create constraint when using MyISAM because it's
+            # not supported.
+            continue
+
+        ref_col = constraint_def['ref_column']
+        ref_engine = ref_col.table.kwargs.get('mysql_engine')
+        if ref_engine == 'MyISAM':
+            # Don't try to create constraint when using MyISAM because it's
+            # not supported.
+            continue
+
+        migrate.ForeignKeyConstraint(
+            columns=[getattr(constraint_def['table'].c,
+                             constraint_def['fk_column'])],
+            refcolumns=[constraint_def['ref_column']]).create()
+
+
+def rename_tables_with_constraints(renames, constraints, engine):
+    """Renames tables with foreign key constraints.
+
+    Tables are renamed after first removing constraints. The constraints are
+    replaced after the rename is complete.
+
+    This works on databases that don't support renaming tables that have
+    constraints on them (DB2).
+
+    `renames` is a dict, mapping {'to_table_name': from_table, ...}
+    """
+
+    if engine.name != 'sqlite':
+        # Sqlite doesn't support constraints, so nothing to remove.
+        remove_constraints(constraints)
+
+    for to_table_name in renames:
+        from_table = renames[to_table_name]
+        from_table.rename(to_table_name)
+
+    if engine != 'sqlite':
+        add_constraints(constraints)
+
+
+def find_migrate_repo(package=None, repo_name='migrate_repo'):
+    package = package or sql
+    path = os.path.abspath(os.path.join(
+        os.path.dirname(package.__file__), repo_name))
+    if os.path.isdir(path):
+        return path
+    raise exception.MigrationNotProvided(package.__name__, path)
+
+
+def _sync_common_repo(version):
+    abs_path = find_migrate_repo()
+    init_version = migrate_repo.DB_INIT_VERSION
+    engine = sql.get_engine()
+    migration.db_sync(engine, abs_path, version=version,
+                      init_version=init_version)
+
+
+def _fix_federation_tables(engine):
+    """Fix the identity_provider, federation_protocol and mapping tables
+     to be InnoDB and Charset UTF8.
+
+    This function is to work around bug #1426334. This has occurred because
+    the original migration did not specify InnoDB and charset utf8. Due
+    to the sanity_check, a deployer can get wedged here and require manual
+    database changes to fix.
+    """
+    # NOTE(marco-fargetta) This is a workaround to "fix" that tables only
+    # if we're under MySQL
+    if engine.name == 'mysql':
+        # * Disable any check for the foreign keys because they prevent the
+        # alter table to execute
+        engine.execute("SET foreign_key_checks = 0")
+        # * Make the tables using InnoDB engine
+        engine.execute("ALTER TABLE identity_provider Engine=InnoDB")
+        engine.execute("ALTER TABLE federation_protocol Engine=InnoDB")
+        engine.execute("ALTER TABLE mapping Engine=InnoDB")
+        # * Make the tables using utf8 encoding
+        engine.execute("ALTER TABLE identity_provider "
+                       "CONVERT TO CHARACTER SET utf8")
+        engine.execute("ALTER TABLE federation_protocol "
+                       "CONVERT TO CHARACTER SET utf8")
+        engine.execute("ALTER TABLE mapping CONVERT TO CHARACTER SET utf8")
+        # * Revert the foreign keys check back
+        engine.execute("SET foreign_key_checks = 1")
+
+
+def _sync_extension_repo(extension, version):
+    init_version = 0
+    engine = sql.get_engine()
+
+    try:
+        package_name = '.'.join((contrib.__name__, extension))
+        package = importutils.import_module(package_name)
+    except ImportError:
+        raise ImportError(_("%s extension does not exist.")
+                          % package_name)
+    try:
+        abs_path = find_migrate_repo(package)
+        try:
+            migration.db_version_control(sql.get_engine(), abs_path)
+        # Register the repo with the version control API
+        # If it already knows about the repo, it will throw
+        # an exception that we can safely ignore
+        except exceptions.DatabaseAlreadyControlledError:
+            pass
+    except exception.MigrationNotProvided as e:
+        print(e)
+        sys.exit(1)
+    try:
+        migration.db_sync(engine, abs_path, version=version,
+                          init_version=init_version)
+    except ValueError:
+        # NOTE(marco-fargetta): ValueError is raised from the sanity check (
+        # verifies that tables are utf8 under mysql). The federation_protocol,
+        # identity_provider and mapping tables were not initially built with
+        # InnoDB and utf8 as part of the table arguments when the migration
+        # was initially created. Bug #1426334 is a scenario where the deployer
+        # can get wedged, unable to upgrade or downgrade.
+        # This is a workaround to "fix" those tables if we're under MySQL and
+        # the version is before the 6 because before the tables were introduced
+        # before and patched when migration 5 was available
+        if engine.name == 'mysql' and \
+           int(six.text_type(get_db_version(extension))) < 6:
+            _fix_federation_tables(engine)
+            # The migration is applied again after the fix
+            migration.db_sync(engine, abs_path, version=version,
+                              init_version=init_version)
+        else:
+            raise
+
+
+def sync_database_to_version(extension=None, version=None):
+    if not extension:
+        _sync_common_repo(version)
+        # If version is greater than 0, it is for the common
+        # repository only, and only that will be synchronized.
+        if version is None:
+            for default_extension in DEFAULT_EXTENSIONS:
+                _sync_extension_repo(default_extension, version)
+    else:
+        _sync_extension_repo(extension, version)
+
+
+def get_db_version(extension=None):
+    if not extension:
+        return migration.db_version(sql.get_engine(), find_migrate_repo(),
+                                    migrate_repo.DB_INIT_VERSION)
+
+    try:
+        package_name = '.'.join((contrib.__name__, extension))
+        package = importutils.import_module(package_name)
+    except ImportError:
+        raise ImportError(_("%s extension does not exist.")
+                          % package_name)
+
+    return migration.db_version(
+        sql.get_engine(), find_migrate_repo(package), 0)
+
+
+def print_db_version(extension=None):
+    try:
+        db_version = get_db_version(extension=extension)
+        print(db_version)
+    except exception.MigrationNotProvided as e:
+        print(e)
+        sys.exit(1)
diff --git a/keystone-moon/keystone/common/utils.py b/keystone-moon/keystone/common/utils.py
new file mode 100644 (file)
index 0000000..a4b03ff
--- /dev/null
@@ -0,0 +1,471 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 - 2012 Justin Santa Barbara
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import calendar
+import collections
+import grp
+import hashlib
+import os
+import pwd
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_serialization import jsonutils
+from oslo_utils import strutils
+import passlib.hash
+import six
+from six import moves
+
+from keystone import exception
+from keystone.i18n import _, _LE, _LW
+
+
+CONF = cfg.CONF
+
+LOG = log.getLogger(__name__)
+
+
+def flatten_dict(d, parent_key=''):
+    """Flatten a nested dictionary
+
+    Converts a dictionary with nested values to a single level flat
+    dictionary, with dotted notation for each key.
+
+    """
+    items = []
+    for k, v in d.items():
+        new_key = parent_key + '.' + k if parent_key else k
+        if isinstance(v, collections.MutableMapping):
+            items.extend(flatten_dict(v, new_key).items())
+        else:
+            items.append((new_key, v))
+    return dict(items)
+
+
+def read_cached_file(filename, cache_info, reload_func=None):
+    """Read from a file if it has been modified.
+
+    :param cache_info: dictionary to hold opaque cache.
+    :param reload_func: optional function to be called with data when
+                        file is reloaded due to a modification.
+
+    :returns: data from file.
+
+    """
+    mtime = os.path.getmtime(filename)
+    if not cache_info or mtime != cache_info.get('mtime'):
+        with open(filename) as fap:
+            cache_info['data'] = fap.read()
+        cache_info['mtime'] = mtime
+        if reload_func:
+            reload_func(cache_info['data'])
+    return cache_info['data']
+
+
+class SmarterEncoder(jsonutils.json.JSONEncoder):
+    """Help for JSON encoding dict-like objects."""
+    def default(self, obj):
+        if not isinstance(obj, dict) and hasattr(obj, 'iteritems'):
+            return dict(obj.iteritems())
+        return super(SmarterEncoder, self).default(obj)
+
+
+class PKIEncoder(SmarterEncoder):
+    """Special encoder to make token JSON a bit shorter."""
+    item_separator = ','
+    key_separator = ':'
+
+
+def verify_length_and_trunc_password(password):
+    """Verify and truncate the provided password to the max_password_length."""
+    max_length = CONF.identity.max_password_length
+    try:
+        if len(password) > max_length:
+            if CONF.strict_password_check:
+                raise exception.PasswordVerificationError(size=max_length)
+            else:
+                LOG.warning(
+                    _LW('Truncating user password to '
+                        '%d characters.'), max_length)
+                return password[:max_length]
+        else:
+            return password
+    except TypeError:
+        raise exception.ValidationError(attribute='string', target='password')
+
+
+def hash_access_key(access):
+    hash_ = hashlib.sha256()
+    hash_.update(access)
+    return hash_.hexdigest()
+
+
+def hash_user_password(user):
+    """Hash a user dict's password without modifying the passed-in dict."""
+    password = user.get('password')
+    if password is None:
+        return user
+
+    return dict(user, password=hash_password(password))
+
+
+def hash_password(password):
+    """Hash a password. Hard."""
+    password_utf8 = verify_length_and_trunc_password(password).encode('utf-8')
+    return passlib.hash.sha512_crypt.encrypt(
+        password_utf8, rounds=CONF.crypt_strength)
+
+
+def check_password(password, hashed):
+    """Check that a plaintext password matches hashed.
+
+    hashpw returns the salt value concatenated with the actual hash value.
+    It extracts the actual salt if this value is then passed as the salt.
+
+    """
+    if password is None or hashed is None:
+        return False
+    password_utf8 = verify_length_and_trunc_password(password).encode('utf-8')
+    return passlib.hash.sha512_crypt.verify(password_utf8, hashed)
+
+
+def attr_as_boolean(val_attr):
+    """Returns the boolean value, decoded from a string.
+
+    We test explicitly for a value meaning False, which can be one of
+    several formats as specified in oslo strutils.FALSE_STRINGS.
+    All other string values (including an empty string) are treated as
+    meaning True.
+
+    """
+    return strutils.bool_from_string(val_attr, default=True)
+
+
+def get_blob_from_credential(credential):
+    try:
+        blob = jsonutils.loads(credential.blob)
+    except (ValueError, TypeError):
+        raise exception.ValidationError(
+            message=_('Invalid blob in credential'))
+    if not blob or not isinstance(blob, dict):
+        raise exception.ValidationError(attribute='blob',
+                                        target='credential')
+    return blob
+
+
+def convert_ec2_to_v3_credential(ec2credential):
+    blob = {'access': ec2credential.access,
+            'secret': ec2credential.secret}
+    return {'id': hash_access_key(ec2credential.access),
+            'user_id': ec2credential.user_id,
+            'project_id': ec2credential.tenant_id,
+            'blob': jsonutils.dumps(blob),
+            'type': 'ec2',
+            'extra': jsonutils.dumps({})}
+
+
+def convert_v3_to_ec2_credential(credential):
+    blob = get_blob_from_credential(credential)
+    return {'access': blob.get('access'),
+            'secret': blob.get('secret'),
+            'user_id': credential.user_id,
+            'tenant_id': credential.project_id,
+            }
+
+
+def unixtime(dt_obj):
+    """Format datetime object as unix timestamp
+
+    :param dt_obj: datetime.datetime object
+    :returns: float
+
+    """
+    return calendar.timegm(dt_obj.utctimetuple())
+
+
+def auth_str_equal(provided, known):
+    """Constant-time string comparison.
+
+    :params provided: the first string
+    :params known: the second string
+
+    :return: True if the strings are equal.
+
+    This function takes two strings and compares them.  It is intended to be
+    used when doing a comparison for authentication purposes to help guard
+    against timing attacks.  When using the function for this purpose, always
+    provide the user-provided password as the first argument.  The time this
+    function will take is always a factor of the length of this string.
+    """
+    result = 0
+    p_len = len(provided)
+    k_len = len(known)
+    for i in moves.range(p_len):
+        a = ord(provided[i]) if i < p_len else 0
+        b = ord(known[i]) if i < k_len else 0
+        result |= a ^ b
+    return (p_len == k_len) & (result == 0)
+
+
+def setup_remote_pydev_debug():
+    if CONF.pydev_debug_host and CONF.pydev_debug_port:
+        try:
+            try:
+                from pydev import pydevd
+            except ImportError:
+                import pydevd
+
+            pydevd.settrace(CONF.pydev_debug_host,
+                            port=CONF.pydev_debug_port,
+                            stdoutToServer=True,
+                            stderrToServer=True)
+            return True
+        except Exception:
+            LOG.exception(_LE(
+                'Error setting up the debug environment. Verify that the '
+                'option --debug-url has the format <host>:<port> and that a '
+                'debugger processes is listening on that port.'))
+            raise
+
+
+def get_unix_user(user=None):
+    '''Get the uid and user name.
+
+    This is a convenience utility which accepts a variety of input
+    which might represent a unix user. If successful it returns the uid
+    and name. Valid input is:
+
+    string
+        A string is first considered to be a user name and a lookup is
+        attempted under that name. If no name is found then an attempt
+        is made to convert the string to an integer and perform a
+        lookup as a uid.
+
+    int
+        An integer is interpretted as a uid.
+
+    None
+        None is interpreted to mean use the current process's
+        effective user.
+
+    If the input is a valid type but no user is found a KeyError is
+    raised. If the input is not a valid type a TypeError is raised.
+
+    :param object user: string, int or None specifying the user to
+                        lookup.
+
+    :return: tuple of (uid, name)
+    '''
+
+    if isinstance(user, six.string_types):
+        try:
+            user_info = pwd.getpwnam(user)
+        except KeyError:
+            try:
+                i = int(user)
+            except ValueError:
+                raise KeyError("user name '%s' not found" % user)
+            try:
+                user_info = pwd.getpwuid(i)
+            except KeyError:
+                raise KeyError("user id %d not found" % i)
+    elif isinstance(user, int):
+        try:
+            user_info = pwd.getpwuid(user)
+        except KeyError:
+            raise KeyError("user id %d not found" % user)
+    elif user is None:
+        user_info = pwd.getpwuid(os.geteuid())
+    else:
+        raise TypeError('user must be string, int or None; not %s (%r)' %
+                        (user.__class__.__name__, user))
+
+    return user_info.pw_uid, user_info.pw_name
+
+
+def get_unix_group(group=None):
+    '''Get the gid and group name.
+
+    This is a convenience utility which accepts a variety of input
+    which might represent a unix group. If successful it returns the gid
+    and name. Valid input is:
+
+    string
+        A string is first considered to be a group name and a lookup is
+        attempted under that name. If no name is found then an attempt
+        is made to convert the string to an integer and perform a
+        lookup as a gid.
+
+    int
+        An integer is interpretted as a gid.
+
+    None
+        None is interpreted to mean use the current process's
+        effective group.
+
+    If the input is a valid type but no group is found a KeyError is
+    raised. If the input is not a valid type a TypeError is raised.
+
+
+    :param object group: string, int or None specifying the group to
+                         lookup.
+
+    :return: tuple of (gid, name)
+    '''
+
+    if isinstance(group, six.string_types):
+        try:
+            group_info = grp.getgrnam(group)
+        except KeyError:
+            # Was an int passed as a string?
+            # Try converting to int and lookup by id instead.
+            try:
+                i = int(group)
+            except ValueError:
+                raise KeyError("group name '%s' not found" % group)
+            try:
+                group_info = grp.getgrgid(i)
+            except KeyError:
+                raise KeyError("group id %d not found" % i)
+    elif isinstance(group, int):
+        try:
+            group_info = grp.getgrgid(group)
+        except KeyError:
+            raise KeyError("group id %d not found" % group)
+    elif group is None:
+        group_info = grp.getgrgid(os.getegid())
+    else:
+        raise TypeError('group must be string, int or None; not %s (%r)' %
+                        (group.__class__.__name__, group))
+
+    return group_info.gr_gid, group_info.gr_name
+
+
+def set_permissions(path, mode=None, user=None, group=None, log=None):
+    '''Set the ownership and permissions on the pathname.
+
+    Each of the mode, user and group are optional, if None then
+    that aspect is not modified.
+
+    Owner and group may be specified either with a symbolic name
+    or numeric id.
+
+    :param string path: Pathname of directory whose existence is assured.
+    :param object mode: ownership permissions flags (int) i.e. chmod,
+                        if None do not set.
+    :param object user: set user, name (string) or uid (integer),
+                         if None do not set.
+    :param object group: set group, name (string) or gid (integer)
+                         if None do not set.
+    :param logger log: logging.logger object, used to emit log messages,
+                       if None no logging is performed.
+    '''
+
+    if user is None:
+        user_uid, user_name = None, None
+    else:
+        user_uid, user_name = get_unix_user(user)
+
+    if group is None:
+        group_gid, group_name = None, None
+    else:
+        group_gid, group_name = get_unix_group(group)
+
+    if log:
+        if mode is None:
+            mode_string = str(mode)
+        else:
+            mode_string = oct(mode)
+        log.debug("set_permissions: "
+                  "path='%s' mode=%s user=%s(%s) group=%s(%s)",
+                  path, mode_string,
+                  user_name, user_uid, group_name, group_gid)
+
+    # Change user and group if specified
+    if user_uid is not None or group_gid is not None:
+        if user_uid is None:
+            user_uid = -1
+        if group_gid is None:
+            group_gid = -1
+        try:
+            os.chown(path, user_uid, group_gid)
+        except OSError as exc:
+            raise EnvironmentError("chown('%s', %s, %s): %s" %
+                                   (path,
+                                    user_name, group_name,
+                                    exc.strerror))
+
+    # Change permission flags
+    if mode is not None:
+        try:
+            os.chmod(path, mode)
+        except OSError as exc:
+            raise EnvironmentError("chmod('%s', %#o): %s" %
+                                   (path, mode, exc.strerror))
+
+
+def make_dirs(path, mode=None, user=None, group=None, log=None):
+    '''Assure directory exists, set ownership and permissions.
+
+    Assure the directory exists and optionally set its ownership
+    and permissions.
+
+    Each of the mode, user and group are optional, if None then
+    that aspect is not modified.
+
+    Owner and group may be specified either with a symbolic name
+    or numeric id.
+
+    :param string path: Pathname of directory whose existence is assured.
+    :param object mode: ownership permissions flags (int) i.e. chmod,
+                        if None do not set.
+    :param object user: set user, name (string) or uid (integer),
+                        if None do not set.
+    :param object group: set group, name (string) or gid (integer)
+                         if None do not set.
+    :param logger log: logging.logger object, used to emit log messages,
+                       if None no logging is performed.
+    '''
+
+    if log:
+        if mode is None:
+            mode_string = str(mode)
+        else:
+            mode_string = oct(mode)
+        log.debug("make_dirs path='%s' mode=%s user=%s group=%s",
+                  path, mode_string, user, group)
+
+    if not os.path.exists(path):
+        try:
+            os.makedirs(path)
+        except OSError as exc:
+            raise EnvironmentError("makedirs('%s'): %s" % (path, exc.strerror))
+
+    set_permissions(path, mode, user, group, log)
+
+
+class WhiteListedItemFilter(object):
+
+    def __init__(self, whitelist, data):
+        self._whitelist = set(whitelist or [])
+        self._data = data
+
+    def __getitem__(self, name):
+        if name not in self._whitelist:
+            raise KeyError
+        return self._data[name]
diff --git a/keystone-moon/keystone/common/validation/__init__.py b/keystone-moon/keystone/common/validation/__init__.py
new file mode 100644 (file)
index 0000000..f9c58ea
--- /dev/null
@@ -0,0 +1,62 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Request body validating middleware for OpenStack Identity resources."""
+
+import functools
+
+from keystone.common.validation import validators
+
+
+def validated(request_body_schema, resource_to_validate):
+    """Register a schema to validate a resource reference.
+
+    Registered schema will be used for validating a request body just before
+    API method execution.
+
+    :param request_body_schema: a schema to validate the resource reference
+    :param resource_to_validate: the reference to validate
+
+    """
+    schema_validator = validators.SchemaValidator(request_body_schema)
+
+    def add_validator(func):
+        @functools.wraps(func)
+        def wrapper(*args, **kwargs):
+            if resource_to_validate in kwargs:
+                schema_validator.validate(kwargs[resource_to_validate])
+            return func(*args, **kwargs)
+        return wrapper
+    return add_validator
+
+
+def nullable(property_schema):
+    """Clone a property schema into one that is nullable.
+
+    :param dict property_schema: schema to clone into a nullable schema
+    :returns: a new dict schema
+    """
+    # TODO(dstanek): deal with the case where type is already a list; we don't
+    #                do that yet so I'm not wasting time on it
+    new_schema = property_schema.copy()
+    new_schema['type'] = [property_schema['type'], 'null']
+    return new_schema
+
+
+def add_array_type(property_schema):
+    """Convert the parameter schema to be of type list.
+
+    :param dict property_schema: schema to add array type to
+    :returns: a new dict schema
+    """
+    new_schema = property_schema.copy()
+    new_schema['type'] = [property_schema['type'], 'array']
+    return new_schema
diff --git a/keystone-moon/keystone/common/validation/parameter_types.py b/keystone-moon/keystone/common/validation/parameter_types.py
new file mode 100644 (file)
index 0000000..c590883
--- /dev/null
@@ -0,0 +1,57 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Common parameter types for validating a request reference."""
+
+boolean = {
+    'type': 'boolean',
+    'enum': [True, False]
+}
+
+# NOTE(lbragstad): Be mindful of this pattern as it might require changes
+# once this is used on user names, LDAP-based user names specifically since
+# commas aren't allowed in the following pattern. Here we are only going to
+# check the length of the name and ensure that it's a string. Right now we are
+# not going to validate on a naming pattern for issues with
+# internationalization.
+name = {
+    'type': 'string',
+    'minLength': 1,
+    'maxLength': 255
+}
+
+id_string = {
+    'type': 'string',
+    'minLength': 1,
+    'maxLength': 64,
+    # TODO(lbragstad): Find a way to make this configurable such that the end
+    # user chooses how much control they want over id_strings with a regex
+    'pattern': '^[a-zA-Z0-9-]+$'
+}
+
+description = {
+    'type': 'string'
+}
+
+url = {
+    'type': 'string',
+    'minLength': 0,
+    'maxLength': 225,
+    # NOTE(edmondsw): we could do more to validate per various RFCs, but
+    # decision was made to err on the side of leniency. The following is based
+    # on rfc1738 section 2.1
+    'pattern': '[a-zA-Z0-9+.-]+:.+'
+}
+
+email = {
+    'type': 'string',
+    'format': 'email'
+}
diff --git a/keystone-moon/keystone/common/validation/validators.py b/keystone-moon/keystone/common/validation/validators.py
new file mode 100644 (file)
index 0000000..a457417
--- /dev/null
@@ -0,0 +1,59 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Internal implementation of request body validating middleware."""
+
+import jsonschema
+
+from keystone import exception
+from keystone.i18n import _
+
+
+class SchemaValidator(object):
+    """Resource reference validator class."""
+
+    validator = None
+    validator_org = jsonschema.Draft4Validator
+
+    def __init__(self, schema):
+        # NOTE(lbragstad): If at some point in the future we want to extend
+        # our validators to include something specific we need to check for,
+        # we can do it here. Nova's V3 API validators extend the validator to
+        # include `self._validate_minimum` and `self._validate_maximum`. This
+        # would be handy if we needed to check for something the jsonschema
+        # didn't by default. See the Nova V3 validator for details on how this
+        # is done.
+        validators = {}
+        validator_cls = jsonschema.validators.extend(self.validator_org,
+                                                     validators)
+        fc = jsonschema.FormatChecker()
+        self.validator = validator_cls(schema, format_checker=fc)
+
+    def validate(self, *args, **kwargs):
+        try:
+            self.validator.validate(*args, **kwargs)
+        except jsonschema.ValidationError as ex:
+            # NOTE: For whole OpenStack message consistency, this error
+            # message has been written in a format consistent with WSME.
+            if len(ex.path) > 0:
+                # NOTE(lbragstad): Here we could think about using iter_errors
+                # as a method of providing invalid parameters back to the
+                # user.
+                # TODO(lbragstad): If the value of a field is confidential or
+                # too long, then we should build the masking in here so that
+                # we don't expose sensitive user information in the event it
+                # fails validation.
+                detail = _("Invalid input for field '%(path)s'. The value is "
+                           "'%(value)s'.") % {'path': ex.path.pop(),
+                                              'value': ex.instance}
+            else:
+                detail = ex.message
+            raise exception.SchemaValidationError(detail=detail)
diff --git a/keystone-moon/keystone/common/wsgi.py b/keystone-moon/keystone/common/wsgi.py
new file mode 100644 (file)
index 0000000..6ee8150
--- /dev/null
@@ -0,0 +1,830 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Utility methods for working with WSGI servers."""
+
+import copy
+import itertools
+import urllib
+
+from oslo_config import cfg
+import oslo_i18n
+from oslo_log import log
+from oslo_serialization import jsonutils
+from oslo_utils import importutils
+from oslo_utils import strutils
+import routes.middleware
+import six
+import webob.dec
+import webob.exc
+
+from keystone.common import dependency
+from keystone.common import json_home
+from keystone.common import utils
+from keystone import exception
+from keystone.i18n import _
+from keystone.i18n import _LI
+from keystone.i18n import _LW
+from keystone.models import token_model
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+# Environment variable used to pass the request context
+CONTEXT_ENV = 'openstack.context'
+
+
+# Environment variable used to pass the request params
+PARAMS_ENV = 'openstack.params'
+
+
+def validate_token_bind(context, token_ref):
+    bind_mode = CONF.token.enforce_token_bind
+
+    if bind_mode == 'disabled':
+        return
+
+    if not isinstance(token_ref, token_model.KeystoneToken):
+        raise exception.UnexpectedError(_('token reference must be a '
+                                          'KeystoneToken type, got: %s') %
+                                        type(token_ref))
+    bind = token_ref.bind
+
+    # permissive and strict modes don't require there to be a bind
+    permissive = bind_mode in ('permissive', 'strict')
+
+    # get the named mode if bind_mode is not one of the known
+    name = None if permissive or bind_mode == 'required' else bind_mode
+
+    if not bind:
+        if permissive:
+            # no bind provided and none required
+            return
+        else:
+            LOG.info(_LI("No bind information present in token"))
+            raise exception.Unauthorized()
+
+    if name and name not in bind:
+        LOG.info(_LI("Named bind mode %s not in bind information"), name)
+        raise exception.Unauthorized()
+
+    for bind_type, identifier in six.iteritems(bind):
+        if bind_type == 'kerberos':
+            if not (context['environment'].get('AUTH_TYPE', '').lower()
+                    == 'negotiate'):
+                LOG.info(_LI("Kerberos credentials required and not present"))
+                raise exception.Unauthorized()
+
+            if not context['environment'].get('REMOTE_USER') == identifier:
+                LOG.info(_LI("Kerberos credentials do not match "
+                             "those in bind"))
+                raise exception.Unauthorized()
+
+            LOG.info(_LI("Kerberos bind authentication successful"))
+
+        elif bind_mode == 'permissive':
+            LOG.debug(("Ignoring unknown bind for permissive mode: "
+                       "{%(bind_type)s: %(identifier)s}"),
+                      {'bind_type': bind_type, 'identifier': identifier})
+        else:
+            LOG.info(_LI("Couldn't verify unknown bind: "
+                         "{%(bind_type)s: %(identifier)s}"),
+                     {'bind_type': bind_type, 'identifier': identifier})
+            raise exception.Unauthorized()
+
+
+def best_match_language(req):
+    """Determines the best available locale from the Accept-Language
+    HTTP header passed in the request.
+    """
+
+    if not req.accept_language:
+        return None
+    return req.accept_language.best_match(
+        oslo_i18n.get_available_languages('keystone'))
+
+
+class BaseApplication(object):
+    """Base WSGI application wrapper. Subclasses need to implement __call__."""
+
+    @classmethod
+    def factory(cls, global_config, **local_config):
+        """Used for paste app factories in paste.deploy config files.
+
+        Any local configuration (that is, values under the [app:APPNAME]
+        section of the paste config) will be passed into the `__init__` method
+        as kwargs.
+
+        A hypothetical configuration would look like:
+
+            [app:wadl]
+            latest_version = 1.3
+            paste.app_factory = keystone.fancy_api:Wadl.factory
+
+        which would result in a call to the `Wadl` class as
+
+            import keystone.fancy_api
+            keystone.fancy_api.Wadl(latest_version='1.3')
+
+        You could of course re-implement the `factory` method in subclasses,
+        but using the kwarg passing it shouldn't be necessary.
+
+        """
+        return cls(**local_config)
+
+    def __call__(self, environ, start_response):
+        r"""Subclasses will probably want to implement __call__ like this:
+
+        @webob.dec.wsgify()
+        def __call__(self, req):
+          # Any of the following objects work as responses:
+
+          # Option 1: simple string
+          res = 'message\n'
+
+          # Option 2: a nicely formatted HTTP exception page
+          res = exc.HTTPForbidden(explanation='Nice try')
+
+          # Option 3: a webob Response object (in case you need to play with
+          # headers, or you want to be treated like an iterable, or or or)
+          res = Response();
+          res.app_iter = open('somefile')
+
+          # Option 4: any wsgi app to be run next
+          res = self.application
+
+          # Option 5: you can get a Response object for a wsgi app, too, to
+          # play with headers etc
+          res = req.get_response(self.application)
+
+          # You can then just return your response...
+          return res
+          # ... or set req.response and return None.
+          req.response = res
+
+        See the end of http://pythonpaste.org/webob/modules/dec.html
+        for more info.
+
+        """
+        raise NotImplementedError('You must implement __call__')
+
+
+@dependency.requires('assignment_api', 'policy_api', 'token_provider_api')
+class Application(BaseApplication):
+    @webob.dec.wsgify()
+    def __call__(self, req):
+        arg_dict = req.environ['wsgiorg.routing_args'][1]
+        action = arg_dict.pop('action')
+        del arg_dict['controller']
+
+        # allow middleware up the stack to provide context, params and headers.
+        context = req.environ.get(CONTEXT_ENV, {})
+        context['query_string'] = dict(six.iteritems(req.params))
+        context['headers'] = dict(six.iteritems(req.headers))
+        context['path'] = req.environ['PATH_INFO']
+        scheme = (None if not CONF.secure_proxy_ssl_header
+                  else req.environ.get(CONF.secure_proxy_ssl_header))
+        if scheme:
+            # NOTE(andrey-mp): "wsgi.url_scheme" contains the protocol used
+            # before the proxy removed it ('https' usually). So if
+            # the webob.Request instance is modified in order to use this
+            # scheme instead of the one defined by API, the call to
+            # webob.Request.relative_url() will return a URL with the correct
+            # scheme.
+            req.environ['wsgi.url_scheme'] = scheme
+        context['host_url'] = req.host_url
+        params = req.environ.get(PARAMS_ENV, {})
+        # authentication and authorization attributes are set as environment
+        # values by the container and processed by the pipeline.  the complete
+        # set is not yet know.
+        context['environment'] = req.environ
+        context['accept_header'] = req.accept
+        req.environ = None
+
+        params.update(arg_dict)
+
+        context.setdefault('is_admin', False)
+
+        # TODO(termie): do some basic normalization on methods
+        method = getattr(self, action)
+
+        # NOTE(morganfainberg): use the request method to normalize the
+        # response code between GET and HEAD requests. The HTTP status should
+        # be the same.
+        req_method = req.environ['REQUEST_METHOD'].upper()
+        LOG.info('%(req_method)s %(path)s?%(params)s', {
+            'req_method': req_method,
+            'path': context['path'],
+            'params': urllib.urlencode(req.params)})
+
+        params = self._normalize_dict(params)
+
+        try:
+            result = method(context, **params)
+        except exception.Unauthorized as e:
+            LOG.warning(
+                _LW("Authorization failed. %(exception)s from "
+                    "%(remote_addr)s"),
+                {'exception': e, 'remote_addr': req.environ['REMOTE_ADDR']})
+            return render_exception(e, context=context,
+                                    user_locale=best_match_language(req))
+        except exception.Error as e:
+            LOG.warning(six.text_type(e))
+            return render_exception(e, context=context,
+                                    user_locale=best_match_language(req))
+        except TypeError as e:
+            LOG.exception(six.text_type(e))
+            return render_exception(exception.ValidationError(e),
+                                    context=context,
+                                    user_locale=best_match_language(req))
+        except Exception as e:
+            LOG.exception(six.text_type(e))
+            return render_exception(exception.UnexpectedError(exception=e),
+                                    context=context,
+                                    user_locale=best_match_language(req))
+
+        if result is None:
+            return render_response(status=(204, 'No Content'))
+        elif isinstance(result, six.string_types):
+            return result
+        elif isinstance(result, webob.Response):
+            return result
+        elif isinstance(result, webob.exc.WSGIHTTPException):
+            return result
+
+        response_code = self._get_response_code(req)
+        return render_response(body=result, status=response_code,
+                               method=req_method)
+
+    def _get_response_code(self, req):
+        req_method = req.environ['REQUEST_METHOD']
+        controller = importutils.import_class('keystone.common.controller')
+        code = None
+        if isinstance(self, controller.V3Controller) and req_method == 'POST':
+            code = (201, 'Created')
+        return code
+
+    def _normalize_arg(self, arg):
+        return arg.replace(':', '_').replace('-', '_')
+
+    def _normalize_dict(self, d):
+        return {self._normalize_arg(k): v for (k, v) in six.iteritems(d)}
+
+    def assert_admin(self, context):
+        if not context['is_admin']:
+            try:
+                user_token_ref = token_model.KeystoneToken(
+                    token_id=context['token_id'],
+                    token_data=self.token_provider_api.validate_token(
+                        context['token_id']))
+            except exception.TokenNotFound as e:
+                raise exception.Unauthorized(e)
+
+            validate_token_bind(context, user_token_ref)
+            creds = copy.deepcopy(user_token_ref.metadata)
+
+            try:
+                creds['user_id'] = user_token_ref.user_id
+            except exception.UnexpectedError:
+                LOG.debug('Invalid user')
+                raise exception.Unauthorized()
+
+            if user_token_ref.project_scoped:
+                creds['tenant_id'] = user_token_ref.project_id
+            else:
+                LOG.debug('Invalid tenant')
+                raise exception.Unauthorized()
+
+            creds['roles'] = user_token_ref.role_names
+            # Accept either is_admin or the admin role
+            self.policy_api.enforce(creds, 'admin_required', {})
+
+    def _attribute_is_empty(self, ref, attribute):
+        """Returns true if the attribute in the given ref (which is a
+        dict) is empty or None.
+        """
+        return ref.get(attribute) is None or ref.get(attribute) == ''
+
+    def _require_attribute(self, ref, attribute):
+        """Ensures the reference contains the specified attribute.
+
+        Raise a ValidationError if the given attribute is not present
+        """
+        if self._attribute_is_empty(ref, attribute):
+            msg = _('%s field is required and cannot be empty') % attribute
+            raise exception.ValidationError(message=msg)
+
+    def _require_attributes(self, ref, attrs):
+        """Ensures the reference contains the specified attributes.
+
+        Raise a ValidationError if any of the given attributes is not present
+        """
+        missing_attrs = [attribute for attribute in attrs
+                         if self._attribute_is_empty(ref, attribute)]
+
+        if missing_attrs:
+            msg = _('%s field(s) cannot be empty') % ', '.join(missing_attrs)
+            raise exception.ValidationError(message=msg)
+
+    def _get_trust_id_for_request(self, context):
+        """Get the trust_id for a call.
+
+        Retrieve the trust_id from the token
+        Returns None if token is not trust scoped
+        """
+        if ('token_id' not in context or
+                context.get('token_id') == CONF.admin_token):
+            LOG.debug(('will not lookup trust as the request auth token is '
+                       'either absent or it is the system admin token'))
+            return None
+
+        try:
+            token_data = self.token_provider_api.validate_token(
+                context['token_id'])
+        except exception.TokenNotFound:
+            LOG.warning(_LW('Invalid token in _get_trust_id_for_request'))
+            raise exception.Unauthorized()
+
+        token_ref = token_model.KeystoneToken(token_id=context['token_id'],
+                                              token_data=token_data)
+        return token_ref.trust_id
+
+    @classmethod
+    def base_url(cls, context, endpoint_type):
+        url = CONF['%s_endpoint' % endpoint_type]
+
+        if url:
+            substitutions = dict(
+                itertools.chain(six.iteritems(CONF),
+                                six.iteritems(CONF.eventlet_server)))
+
+            url = url % substitutions
+        else:
+            # NOTE(jamielennox): if url is not set via the config file we
+            # should set it relative to the url that the user used to get here
+            # so as not to mess with version discovery. This is not perfect.
+            # host_url omits the path prefix, but there isn't another good
+            # solution that will work for all urls.
+            url = context['host_url']
+
+        return url.rstrip('/')
+
+
+class Middleware(Application):
+    """Base WSGI middleware.
+
+    These classes require an application to be
+    initialized that will be called next.  By default the middleware will
+    simply call its wrapped app, or you can override __call__ to customize its
+    behavior.
+
+    """
+
+    @classmethod
+    def factory(cls, global_config, **local_config):
+        """Used for paste app factories in paste.deploy config files.
+
+        Any local configuration (that is, values under the [filter:APPNAME]
+        section of the paste config) will be passed into the `__init__` method
+        as kwargs.
+
+        A hypothetical configuration would look like:
+
+            [filter:analytics]
+            redis_host = 127.0.0.1
+            paste.filter_factory = keystone.analytics:Analytics.factory
+
+        which would result in a call to the `Analytics` class as
+
+            import keystone.analytics
+            keystone.analytics.Analytics(app, redis_host='127.0.0.1')
+
+        You could of course re-implement the `factory` method in subclasses,
+        but using the kwarg passing it shouldn't be necessary.
+
+        """
+        def _factory(app):
+            conf = global_config.copy()
+            conf.update(local_config)
+            return cls(app, **local_config)
+        return _factory
+
+    def __init__(self, application):
+        super(Middleware, self).__init__()
+        self.application = application
+
+    def process_request(self, request):
+        """Called on each request.
+
+        If this returns None, the next application down the stack will be
+        executed. If it returns a response then that response will be returned
+        and execution will stop here.
+
+        """
+        return None
+
+    def process_response(self, request, response):
+        """Do whatever you'd like to the response, based on the request."""
+        return response
+
+    @webob.dec.wsgify()
+    def __call__(self, request):
+        try:
+            response = self.process_request(request)
+            if response:
+                return response
+            response = request.get_response(self.application)
+            return self.process_response(request, response)
+        except exception.Error as e:
+            LOG.warning(six.text_type(e))
+            return render_exception(e, request=request,
+                                    user_locale=best_match_language(request))
+        except TypeError as e:
+            LOG.exception(six.text_type(e))
+            return render_exception(exception.ValidationError(e),
+                                    request=request,
+                                    user_locale=best_match_language(request))
+        except Exception as e:
+            LOG.exception(six.text_type(e))
+            return render_exception(exception.UnexpectedError(exception=e),
+                                    request=request,
+                                    user_locale=best_match_language(request))
+
+
+class Debug(Middleware):
+    """Helper class for debugging a WSGI application.
+
+    Can be inserted into any WSGI application chain to get information
+    about the request and response.
+
+    """
+
+    @webob.dec.wsgify()
+    def __call__(self, req):
+        if not hasattr(LOG, 'isEnabledFor') or LOG.isEnabledFor(LOG.debug):
+            LOG.debug('%s %s %s', ('*' * 20), 'REQUEST ENVIRON', ('*' * 20))
+            for key, value in req.environ.items():
+                LOG.debug('%s = %s', key,
+                          strutils.mask_password(value))
+            LOG.debug('')
+            LOG.debug('%s %s %s', ('*' * 20), 'REQUEST BODY', ('*' * 20))
+            for line in req.body_file:
+                LOG.debug('%s', strutils.mask_password(line))
+            LOG.debug('')
+
+        resp = req.get_response(self.application)
+        if not hasattr(LOG, 'isEnabledFor') or LOG.isEnabledFor(LOG.debug):
+            LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE HEADERS', ('*' * 20))
+            for (key, value) in six.iteritems(resp.headers):
+                LOG.debug('%s = %s', key, value)
+            LOG.debug('')
+
+        resp.app_iter = self.print_generator(resp.app_iter)
+
+        return resp
+
+    @staticmethod
+    def print_generator(app_iter):
+        """Iterator that prints the contents of a wrapper string."""
+        LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE BODY', ('*' * 20))
+        for part in app_iter:
+            LOG.debug(part)
+            yield part
+
+
+class Router(object):
+    """WSGI middleware that maps incoming requests to WSGI apps."""
+
+    def __init__(self, mapper):
+        """Create a router for the given routes.Mapper.
+
+        Each route in `mapper` must specify a 'controller', which is a
+        WSGI app to call.  You'll probably want to specify an 'action' as
+        well and have your controller be an object that can route
+        the request to the action-specific method.
+
+        Examples:
+          mapper = routes.Mapper()
+          sc = ServerController()
+
+          # Explicit mapping of one route to a controller+action
+          mapper.connect(None, '/svrlist', controller=sc, action='list')
+
+          # Actions are all implicitly defined
+          mapper.resource('server', 'servers', controller=sc)
+
+          # Pointing to an arbitrary WSGI app.  You can specify the
+          # {path_info:.*} parameter so the target app can be handed just that
+          # section of the URL.
+          mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
+
+        """
+        self.map = mapper
+        self._router = routes.middleware.RoutesMiddleware(self._dispatch,
+                                                          self.map)
+
+    @webob.dec.wsgify()
+    def __call__(self, req):
+        """Route the incoming request to a controller based on self.map.
+
+        If no match, return a 404.
+
+        """
+        return self._router
+
+    @staticmethod
+    @webob.dec.wsgify()
+    def _dispatch(req):
+        """Dispatch the request to the appropriate controller.
+
+        Called by self._router after matching the incoming request to a route
+        and putting the information into req.environ.  Either returns 404
+        or the routed WSGI app's response.
+
+        """
+        match = req.environ['wsgiorg.routing_args'][1]
+        if not match:
+            msg = _('The resource could not be found.')
+            return render_exception(exception.NotFound(msg),
+                                    request=req,
+                                    user_locale=best_match_language(req))
+        app = match['controller']
+        return app
+
+
+class ComposingRouter(Router):
+    def __init__(self, mapper=None, routers=None):
+        if mapper is None:
+            mapper = routes.Mapper()
+        if routers is None:
+            routers = []
+        for router in routers:
+            router.add_routes(mapper)
+        super(ComposingRouter, self).__init__(mapper)
+
+
+class ComposableRouter(Router):
+    """Router that supports use by ComposingRouter."""
+
+    def __init__(self, mapper=None):
+        if mapper is None:
+            mapper = routes.Mapper()
+        self.add_routes(mapper)
+        super(ComposableRouter, self).__init__(mapper)
+
+    def add_routes(self, mapper):
+        """Add routes to given mapper."""
+        pass
+
+
+class ExtensionRouter(Router):
+    """A router that allows extensions to supplement or overwrite routes.
+
+    Expects to be subclassed.
+    """
+    def __init__(self, application, mapper=None):
+        if mapper is None:
+            mapper = routes.Mapper()
+        self.application = application
+        self.add_routes(mapper)
+        mapper.connect('{path_info:.*}', controller=self.application)
+        super(ExtensionRouter, self).__init__(mapper)
+
+    def add_routes(self, mapper):
+        pass
+
+    @classmethod
+    def factory(cls, global_config, **local_config):
+        """Used for paste app factories in paste.deploy config files.
+
+        Any local configuration (that is, values under the [filter:APPNAME]
+        section of the paste config) will be passed into the `__init__` method
+        as kwargs.
+
+        A hypothetical configuration would look like:
+
+            [filter:analytics]
+            redis_host = 127.0.0.1
+            paste.filter_factory = keystone.analytics:Analytics.factory
+
+        which would result in a call to the `Analytics` class as
+
+            import keystone.analytics
+            keystone.analytics.Analytics(app, redis_host='127.0.0.1')
+
+        You could of course re-implement the `factory` method in subclasses,
+        but using the kwarg passing it shouldn't be necessary.
+
+        """
+        def _factory(app):
+            conf = global_config.copy()
+            conf.update(local_config)
+            return cls(app, **local_config)
+        return _factory
+
+
+class RoutersBase(object):
+    """Base class for Routers."""
+
+    def __init__(self):
+        self.v3_resources = []
+
+    def append_v3_routers(self, mapper, routers):
+        """Append v3 routers.
+
+        Subclasses should override this method to map its routes.
+
+        Use self._add_resource() to map routes for a resource.
+        """
+
+    def _add_resource(self, mapper, controller, path, rel,
+                      get_action=None, head_action=None, get_head_action=None,
+                      put_action=None, post_action=None, patch_action=None,
+                      delete_action=None, get_post_action=None,
+                      path_vars=None, status=None):
+        if get_head_action:
+            getattr(controller, get_head_action)  # ensure the attribute exists
+            mapper.connect(path, controller=controller, action=get_head_action,
+                           conditions=dict(method=['GET', 'HEAD']))
+        if get_action:
+            getattr(controller, get_action)  # ensure the attribute exists
+            mapper.connect(path, controller=controller, action=get_action,
+                           conditions=dict(method=['GET']))
+        if head_action:
+            getattr(controller, head_action)  # ensure the attribute exists
+            mapper.connect(path, controller=controller, action=head_action,
+                           conditions=dict(method=['HEAD']))
+        if put_action:
+            getattr(controller, put_action)  # ensure the attribute exists
+            mapper.connect(path, controller=controller, action=put_action,
+                           conditions=dict(method=['PUT']))
+        if post_action:
+            getattr(controller, post_action)  # ensure the attribute exists
+            mapper.connect(path, controller=controller, action=post_action,
+                           conditions=dict(method=['POST']))
+        if patch_action:
+            getattr(controller, patch_action)  # ensure the attribute exists
+            mapper.connect(path, controller=controller, action=patch_action,
+                           conditions=dict(method=['PATCH']))
+        if delete_action:
+            getattr(controller, delete_action)  # ensure the attribute exists
+            mapper.connect(path, controller=controller, action=delete_action,
+                           conditions=dict(method=['DELETE']))
+        if get_post_action:
+            getattr(controller, get_post_action)  # ensure the attribute exists
+            mapper.connect(path, controller=controller, action=get_post_action,
+                           conditions=dict(method=['GET', 'POST']))
+
+        resource_data = dict()
+
+        if path_vars:
+            resource_data['href-template'] = path
+            resource_data['href-vars'] = path_vars
+        else:
+            resource_data['href'] = path
+
+        if status:
+            if not json_home.Status.is_supported(status):
+                raise exception.Error(message=_(
+                    'Unexpected status requested for JSON Home response, %s') %
+                    status)
+            resource_data.setdefault('hints', {})
+            resource_data['hints']['status'] = status
+
+        self.v3_resources.append((rel, resource_data))
+
+
+class V3ExtensionRouter(ExtensionRouter, RoutersBase):
+    """Base class for V3 extension router."""
+
+    def __init__(self, application, mapper=None):
+        self.v3_resources = list()
+        super(V3ExtensionRouter, self).__init__(application, mapper)
+
+    def _update_version_response(self, response_data):
+        response_data['resources'].update(self.v3_resources)
+
+    @webob.dec.wsgify()
+    def __call__(self, request):
+        if request.path_info != '/':
+            # Not a request for version info so forward to super.
+            return super(V3ExtensionRouter, self).__call__(request)
+
+        response = request.get_response(self.application)
+
+        if response.status_code != 200:
+            # The request failed, so don't update the response.
+            return response
+
+        if response.headers['Content-Type'] != 'application/json-home':
+            # Not a request for JSON Home document, so don't update the
+            # response.
+            return response
+
+        response_data = jsonutils.loads(response.body)
+        self._update_version_response(response_data)
+        response.body = jsonutils.dumps(response_data,
+                                        cls=utils.SmarterEncoder)
+        return response
+
+
+def render_response(body=None, status=None, headers=None, method=None):
+    """Forms a WSGI response."""
+    if headers is None:
+        headers = []
+    else:
+        headers = list(headers)
+    headers.append(('Vary', 'X-Auth-Token'))
+
+    if body is None:
+        body = ''
+        status = status or (204, 'No Content')
+    else:
+        content_types = [v for h, v in headers if h == 'Content-Type']
+        if content_types:
+            content_type = content_types[0]
+        else:
+            content_type = None
+
+        JSON_ENCODE_CONTENT_TYPES = ('application/json',
+                                     'application/json-home',)
+        if content_type is None or content_type in JSON_ENCODE_CONTENT_TYPES:
+            body = jsonutils.dumps(body, cls=utils.SmarterEncoder)
+            if content_type is None:
+                headers.append(('Content-Type', 'application/json'))
+        status = status or (200, 'OK')
+
+    resp = webob.Response(body=body,
+                          status='%s %s' % status,
+                          headerlist=headers)
+
+    if method == 'HEAD':
+        # NOTE(morganfainberg): HEAD requests should return the same status
+        # as a GET request and same headers (including content-type and
+        # content-length). The webob.Response object automatically changes
+        # content-length (and other headers) if the body is set to b''. Capture
+        # all headers and reset them on the response object after clearing the
+        # body. The body can only be set to a binary-type (not TextType or
+        # NoneType), so b'' is used here and should be compatible with
+        # both py2x and py3x.
+        stored_headers = resp.headers.copy()
+        resp.body = b''
+        for header, value in six.iteritems(stored_headers):
+            resp.headers[header] = value
+
+    return resp
+
+
+def render_exception(error, context=None, request=None, user_locale=None):
+    """Forms a WSGI response based on the current error."""
+
+    error_message = error.args[0]
+    message = oslo_i18n.translate(error_message, desired_locale=user_locale)
+    if message is error_message:
+        # translate() didn't do anything because it wasn't a Message,
+        # convert to a string.
+        message = six.text_type(message)
+
+    body = {'error': {
+        'code': error.code,
+        'title': error.title,
+        'message': message,
+    }}
+    headers = []
+    if isinstance(error, exception.AuthPluginException):
+        body['error']['identity'] = error.authentication
+    elif isinstance(error, exception.Unauthorized):
+        url = CONF.public_endpoint
+        if not url:
+            if request:
+                context = {'host_url': request.host_url}
+            if context:
+                url = Application.base_url(context, 'public')
+            else:
+                url = 'http://localhost:%d' % CONF.eventlet_server.public_port
+        else:
+            substitutions = dict(
+                itertools.chain(six.iteritems(CONF),
+                                six.iteritems(CONF.eventlet_server)))
+            url = url % substitutions
+
+        headers.append(('WWW-Authenticate', 'Keystone uri="%s"' % url))
+    return render_response(status=(error.code, error.title),
+                           body=body,
+                           headers=headers)
diff --git a/keystone-moon/keystone/config.py b/keystone-moon/keystone/config.py
new file mode 100644 (file)
index 0000000..3d9a29f
--- /dev/null
@@ -0,0 +1,91 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Wrapper for keystone.common.config that configures itself on import."""
+
+import logging
+import os
+
+from oslo_config import cfg
+from oslo_log import log
+
+from keystone.common import config
+from keystone import exception
+
+
+CONF = cfg.CONF
+
+setup_authentication = config.setup_authentication
+configure = config.configure
+
+
+def set_default_for_default_log_levels():
+    """Set the default for the default_log_levels option for keystone.
+
+    Keystone uses some packages that other OpenStack services don't use that do
+    logging. This will set the default_log_levels default level for those
+    packages.
+
+    This function needs to be called before CONF().
+
+    """
+
+    extra_log_level_defaults = [
+        'dogpile=INFO',
+        'routes=INFO',
+        'keystone.common._memcache_pool=INFO',
+    ]
+
+    log.register_options(CONF)
+    CONF.default_log_levels.extend(extra_log_level_defaults)
+
+
+def setup_logging():
+    """Sets up logging for the keystone package."""
+    log.setup(CONF, 'keystone')
+    logging.captureWarnings(True)
+
+
+def find_paste_config():
+    """Find Keystone's paste.deploy configuration file.
+
+    Keystone's paste.deploy configuration file is specified in the
+    ``[paste_deploy]`` section of the main Keystone configuration file,
+    ``keystone.conf``.
+
+    For example::
+
+        [paste_deploy]
+        config_file = keystone-paste.ini
+
+    :returns: The selected configuration filename
+    :raises: exception.ConfigFileNotFound
+
+    """
+    if CONF.paste_deploy.config_file:
+        paste_config = CONF.paste_deploy.config_file
+        paste_config_value = paste_config
+        if not os.path.isabs(paste_config):
+            paste_config = CONF.find_file(paste_config)
+    elif CONF.config_file:
+        paste_config = CONF.config_file[0]
+        paste_config_value = paste_config
+    else:
+        # this provides backwards compatibility for keystone.conf files that
+        # still have the entire paste configuration included, rather than just
+        # a [paste_deploy] configuration section referring to an external file
+        paste_config = CONF.find_file('keystone.conf')
+        paste_config_value = 'keystone.conf'
+    if not paste_config or not os.path.exists(paste_config):
+        raise exception.ConfigFileNotFound(config_file=paste_config_value)
+    return paste_config
diff --git a/keystone-moon/keystone/contrib/__init__.py b/keystone-moon/keystone/contrib/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/contrib/admin_crud/__init__.py b/keystone-moon/keystone/contrib/admin_crud/__init__.py
new file mode 100644 (file)
index 0000000..d602092
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.contrib.admin_crud.core import *  # noqa
diff --git a/keystone-moon/keystone/contrib/admin_crud/core.py b/keystone-moon/keystone/contrib/admin_crud/core.py
new file mode 100644 (file)
index 0000000..5d69d24
--- /dev/null
@@ -0,0 +1,241 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone import assignment
+from keystone import catalog
+from keystone.common import extension
+from keystone.common import wsgi
+from keystone import identity
+from keystone import resource
+
+
+extension.register_admin_extension(
+    'OS-KSADM', {
+        'name': 'OpenStack Keystone Admin',
+        'namespace': 'http://docs.openstack.org/identity/api/ext/'
+                     'OS-KSADM/v1.0',
+        'alias': 'OS-KSADM',
+        'updated': '2013-07-11T17:14:00-00:00',
+        'description': 'OpenStack extensions to Keystone v2.0 API '
+                       'enabling Administrative Operations.',
+        'links': [
+            {
+                'rel': 'describedby',
+                # TODO(dolph): link needs to be revised after
+                #              bug 928059 merges
+                'type': 'text/html',
+                'href': 'https://github.com/openstack/identity-api',
+            }
+        ]})
+
+
+class CrudExtension(wsgi.ExtensionRouter):
+    """Previously known as the OS-KSADM extension.
+
+    Provides a bunch of CRUD operations for internal data types.
+
+    """
+
+    def add_routes(self, mapper):
+        tenant_controller = resource.controllers.Tenant()
+        assignment_tenant_controller = (
+            assignment.controllers.TenantAssignment())
+        user_controller = identity.controllers.User()
+        role_controller = assignment.controllers.Role()
+        assignment_role_controller = assignment.controllers.RoleAssignmentV2()
+        service_controller = catalog.controllers.Service()
+        endpoint_controller = catalog.controllers.Endpoint()
+
+        # Tenant Operations
+        mapper.connect(
+            '/tenants',
+            controller=tenant_controller,
+            action='create_project',
+            conditions=dict(method=['POST']))
+        mapper.connect(
+            '/tenants/{tenant_id}',
+            controller=tenant_controller,
+            action='update_project',
+            conditions=dict(method=['PUT', 'POST']))
+        mapper.connect(
+            '/tenants/{tenant_id}',
+            controller=tenant_controller,
+            action='delete_project',
+            conditions=dict(method=['DELETE']))
+        mapper.connect(
+            '/tenants/{tenant_id}/users',
+            controller=assignment_tenant_controller,
+            action='get_project_users',
+            conditions=dict(method=['GET']))
+
+        # User Operations
+        mapper.connect(
+            '/users',
+            controller=user_controller,
+            action='get_users',
+            conditions=dict(method=['GET']))
+        mapper.connect(
+            '/users',
+            controller=user_controller,
+            action='create_user',
+            conditions=dict(method=['POST']))
+        # NOTE(termie): not in diablo
+        mapper.connect(
+            '/users/{user_id}',
+            controller=user_controller,
+            action='update_user',
+            conditions=dict(method=['PUT']))
+        mapper.connect(
+            '/users/{user_id}',
+            controller=user_controller,
+            action='delete_user',
+            conditions=dict(method=['DELETE']))
+
+        # COMPAT(diablo): the copy with no OS-KSADM is from diablo
+        mapper.connect(
+            '/users/{user_id}/password',
+            controller=user_controller,
+            action='set_user_password',
+            conditions=dict(method=['PUT']))
+        mapper.connect(
+            '/users/{user_id}/OS-KSADM/password',
+            controller=user_controller,
+            action='set_user_password',
+            conditions=dict(method=['PUT']))
+
+        # COMPAT(diablo): the copy with no OS-KSADM is from diablo
+        mapper.connect(
+            '/users/{user_id}/tenant',
+            controller=user_controller,
+            action='update_user',
+            conditions=dict(method=['PUT']))
+        mapper.connect(
+            '/users/{user_id}/OS-KSADM/tenant',
+            controller=user_controller,
+            action='update_user',
+            conditions=dict(method=['PUT']))
+
+        # COMPAT(diablo): the copy with no OS-KSADM is from diablo
+        mapper.connect(
+            '/users/{user_id}/enabled',
+            controller=user_controller,
+            action='set_user_enabled',
+            conditions=dict(method=['PUT']))
+        mapper.connect(
+            '/users/{user_id}/OS-KSADM/enabled',
+            controller=user_controller,
+            action='set_user_enabled',
+            conditions=dict(method=['PUT']))
+
+        # User Roles
+        mapper.connect(
+            '/users/{user_id}/roles/OS-KSADM/{role_id}',
+            controller=assignment_role_controller,
+            action='add_role_to_user',
+            conditions=dict(method=['PUT']))
+        mapper.connect(
+            '/users/{user_id}/roles/OS-KSADM/{role_id}',
+            controller=assignment_role_controller,
+            action='remove_role_from_user',
+            conditions=dict(method=['DELETE']))
+
+        # COMPAT(diablo): User Roles
+        mapper.connect(
+            '/users/{user_id}/roleRefs',
+            controller=assignment_role_controller,
+            action='get_role_refs',
+            conditions=dict(method=['GET']))
+        mapper.connect(
+            '/users/{user_id}/roleRefs',
+            controller=assignment_role_controller,
+            action='create_role_ref',
+            conditions=dict(method=['POST']))
+        mapper.connect(
+            '/users/{user_id}/roleRefs/{role_ref_id}',
+            controller=assignment_role_controller,
+            action='delete_role_ref',
+            conditions=dict(method=['DELETE']))
+
+        # User-Tenant Roles
+        mapper.connect(
+            '/tenants/{tenant_id}/users/{user_id}/roles/OS-KSADM/{role_id}',
+            controller=assignment_role_controller,
+            action='add_role_to_user',
+            conditions=dict(method=['PUT']))
+        mapper.connect(
+            '/tenants/{tenant_id}/users/{user_id}/roles/OS-KSADM/{role_id}',
+            controller=assignment_role_controller,
+            action='remove_role_from_user',
+            conditions=dict(method=['DELETE']))
+
+        # Service Operations
+        mapper.connect(
+            '/OS-KSADM/services',
+            controller=service_controller,
+            action='get_services',
+            conditions=dict(method=['GET']))
+        mapper.connect(
+            '/OS-KSADM/services',
+            controller=service_controller,
+            action='create_service',
+            conditions=dict(method=['POST']))
+        mapper.connect(
+            '/OS-KSADM/services/{service_id}',
+            controller=service_controller,
+            action='delete_service',
+            conditions=dict(method=['DELETE']))
+        mapper.connect(
+            '/OS-KSADM/services/{service_id}',
+            controller=service_controller,
+            action='get_service',
+            conditions=dict(method=['GET']))
+
+        # Endpoint Templates
+        mapper.connect(
+            '/endpoints',
+            controller=endpoint_controller,
+            action='get_endpoints',
+            conditions=dict(method=['GET']))
+        mapper.connect(
+            '/endpoints',
+            controller=endpoint_controller,
+            action='create_endpoint',
+            conditions=dict(method=['POST']))
+        mapper.connect(
+            '/endpoints/{endpoint_id}',
+            controller=endpoint_controller,
+            action='delete_endpoint',
+            conditions=dict(method=['DELETE']))
+
+        # Role Operations
+        mapper.connect(
+            '/OS-KSADM/roles',
+            controller=role_controller,
+            action='create_role',
+            conditions=dict(method=['POST']))
+        mapper.connect(
+            '/OS-KSADM/roles',
+            controller=role_controller,
+            action='get_roles',
+            conditions=dict(method=['GET']))
+        mapper.connect(
+            '/OS-KSADM/roles/{role_id}',
+            controller=role_controller,
+            action='get_role',
+            conditions=dict(method=['GET']))
+        mapper.connect(
+            '/OS-KSADM/roles/{role_id}',
+            controller=role_controller,
+            action='delete_role',
+            conditions=dict(method=['DELETE']))
diff --git a/keystone-moon/keystone/contrib/ec2/__init__.py b/keystone-moon/keystone/contrib/ec2/__init__.py
new file mode 100644 (file)
index 0000000..88622e5
--- /dev/null
@@ -0,0 +1,18 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.contrib.ec2 import controllers  # noqa
+from keystone.contrib.ec2.core import *  # noqa
+from keystone.contrib.ec2.routers import Ec2Extension  # noqa
+from keystone.contrib.ec2.routers import Ec2ExtensionV3  # noqa
diff --git a/keystone-moon/keystone/contrib/ec2/controllers.py b/keystone-moon/keystone/contrib/ec2/controllers.py
new file mode 100644 (file)
index 0000000..6e6d326
--- /dev/null
@@ -0,0 +1,415 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Main entry point into the EC2 Credentials service.
+
+This service allows the creation of access/secret credentials used for
+the ec2 interop layer of OpenStack.
+
+A user can create as many access/secret pairs, each of which map to a
+specific project.  This is required because OpenStack supports a user
+belonging to multiple projects, whereas the signatures created on ec2-style
+requests don't allow specification of which project the user wishes to act
+upon.
+
+To complete the cycle, we provide a method that OpenStack services can
+use to validate a signature and get a corresponding OpenStack token.  This
+token allows method calls to other services within the context the
+access/secret was created.  As an example, Nova requests Keystone to validate
+the signature of a request, receives a token, and then makes a request to
+Glance to list images needed to perform the requested task.
+
+"""
+
+import abc
+import sys
+import uuid
+
+from keystoneclient.contrib.ec2 import utils as ec2_utils
+from oslo_serialization import jsonutils
+import six
+
+from keystone.common import controller
+from keystone.common import dependency
+from keystone.common import utils
+from keystone.common import wsgi
+from keystone import exception
+from keystone.i18n import _
+from keystone.models import token_model
+
+
+@dependency.requires('assignment_api', 'catalog_api', 'credential_api',
+                     'identity_api', 'resource_api', 'role_api',
+                     'token_provider_api')
+@six.add_metaclass(abc.ABCMeta)
+class Ec2ControllerCommon(object):
+    def check_signature(self, creds_ref, credentials):
+        signer = ec2_utils.Ec2Signer(creds_ref['secret'])
+        signature = signer.generate(credentials)
+        if utils.auth_str_equal(credentials['signature'], signature):
+            return
+        # NOTE(vish): Some libraries don't use the port when signing
+        #             requests, so try again without port.
+        elif ':' in credentials['signature']:
+            hostname, _port = credentials['host'].split(':')
+            credentials['host'] = hostname
+            signature = signer.generate(credentials)
+            if not utils.auth_str_equal(credentials.signature, signature):
+                raise exception.Unauthorized(message='Invalid EC2 signature.')
+        else:
+            raise exception.Unauthorized(message='EC2 signature not supplied.')
+
+    @abc.abstractmethod
+    def authenticate(self, context, credentials=None, ec2Credentials=None):
+        """Validate a signed EC2 request and provide a token.
+
+        Other services (such as Nova) use this **admin** call to determine
+        if a request they signed received is from a valid user.
+
+        If it is a valid signature, an OpenStack token that maps
+        to the user/tenant is returned to the caller, along with
+        all the other details returned from a normal token validation
+        call.
+
+        The returned token is useful for making calls to other
+        OpenStack services within the context of the request.
+
+        :param context: standard context
+        :param credentials: dict of ec2 signature
+        :param ec2Credentials: DEPRECATED dict of ec2 signature
+        :returns: token: OpenStack token equivalent to access key along
+                         with the corresponding service catalog and roles
+        """
+        raise exception.NotImplemented()
+
+    def _authenticate(self, credentials=None, ec2credentials=None):
+        """Common code shared between the V2 and V3 authenticate methods.
+
+        :returns: user_ref, tenant_ref, metadata_ref, roles_ref, catalog_ref
+        """
+
+        # FIXME(ja): validate that a service token was used!
+
+        # NOTE(termie): backwards compat hack
+        if not credentials and ec2credentials:
+            credentials = ec2credentials
+
+        if 'access' not in credentials:
+            raise exception.Unauthorized(message='EC2 signature not supplied.')
+
+        creds_ref = self._get_credentials(credentials['access'])
+        self.check_signature(creds_ref, credentials)
+
+        # TODO(termie): don't create new tokens every time
+        # TODO(termie): this is copied from TokenController.authenticate
+        tenant_ref = self.resource_api.get_project(creds_ref['tenant_id'])
+        user_ref = self.identity_api.get_user(creds_ref['user_id'])
+        metadata_ref = {}
+        metadata_ref['roles'] = (
+            self.assignment_api.get_roles_for_user_and_project(
+                user_ref['id'], tenant_ref['id']))
+
+        trust_id = creds_ref.get('trust_id')
+        if trust_id:
+            metadata_ref['trust_id'] = trust_id
+            metadata_ref['trustee_user_id'] = user_ref['id']
+
+        # Validate that the auth info is valid and nothing is disabled
+        try:
+            self.identity_api.assert_user_enabled(
+                user_id=user_ref['id'], user=user_ref)
+            self.resource_api.assert_domain_enabled(
+                domain_id=user_ref['domain_id'])
+            self.resource_api.assert_project_enabled(
+                project_id=tenant_ref['id'], project=tenant_ref)
+        except AssertionError as e:
+            six.reraise(exception.Unauthorized, exception.Unauthorized(e),
+                        sys.exc_info()[2])
+
+        roles = metadata_ref.get('roles', [])
+        if not roles:
+            raise exception.Unauthorized(message='User not valid for tenant.')
+        roles_ref = [self.role_api.get_role(role_id) for role_id in roles]
+
+        catalog_ref = self.catalog_api.get_catalog(
+            user_ref['id'], tenant_ref['id'])
+
+        return user_ref, tenant_ref, metadata_ref, roles_ref, catalog_ref
+
+    def create_credential(self, context, user_id, tenant_id):
+        """Create a secret/access pair for use with ec2 style auth.
+
+        Generates a new set of credentials that map the user/tenant
+        pair.
+
+        :param context: standard context
+        :param user_id: id of user
+        :param tenant_id: id of tenant
+        :returns: credential: dict of ec2 credential
+        """
+
+        self.identity_api.get_user(user_id)
+        self.resource_api.get_project(tenant_id)
+        trust_id = self._get_trust_id_for_request(context)
+        blob = {'access': uuid.uuid4().hex,
+                'secret': uuid.uuid4().hex,
+                'trust_id': trust_id}
+        credential_id = utils.hash_access_key(blob['access'])
+        cred_ref = {'user_id': user_id,
+                    'project_id': tenant_id,
+                    'blob': jsonutils.dumps(blob),
+                    'id': credential_id,
+                    'type': 'ec2'}
+        self.credential_api.create_credential(credential_id, cred_ref)
+        return {'credential': self._convert_v3_to_ec2_credential(cred_ref)}
+
+    def get_credentials(self, user_id):
+        """List all credentials for a user.
+
+        :param user_id: id of user
+        :returns: credentials: list of ec2 credential dicts
+        """
+
+        self.identity_api.get_user(user_id)
+        credential_refs = self.credential_api.list_credentials_for_user(
+            user_id)
+        return {'credentials':
+                [self._convert_v3_to_ec2_credential(credential)
+                    for credential in credential_refs]}
+
+    def get_credential(self, user_id, credential_id):
+        """Retrieve a user's access/secret pair by the access key.
+
+        Grab the full access/secret pair for a given access key.
+
+        :param user_id: id of user
+        :param credential_id: access key for credentials
+        :returns: credential: dict of ec2 credential
+        """
+
+        self.identity_api.get_user(user_id)
+        return {'credential': self._get_credentials(credential_id)}
+
+    def delete_credential(self, user_id, credential_id):
+        """Delete a user's access/secret pair.
+
+        Used to revoke a user's access/secret pair
+
+        :param user_id: id of user
+        :param credential_id: access key for credentials
+        :returns: bool: success
+        """
+
+        self.identity_api.get_user(user_id)
+        self._get_credentials(credential_id)
+        ec2_credential_id = utils.hash_access_key(credential_id)
+        return self.credential_api.delete_credential(ec2_credential_id)
+
+    @staticmethod
+    def _convert_v3_to_ec2_credential(credential):
+        # Prior to bug #1259584 fix, blob was stored unserialized
+        # but it should be stored as a json string for compatibility
+        # with the v3 credentials API.  Fall back to the old behavior
+        # for backwards compatibility with existing DB contents
+        try:
+            blob = jsonutils.loads(credential['blob'])
+        except TypeError:
+            blob = credential['blob']
+        return {'user_id': credential.get('user_id'),
+                'tenant_id': credential.get('project_id'),
+                'access': blob.get('access'),
+                'secret': blob.get('secret'),
+                'trust_id': blob.get('trust_id')}
+
+    def _get_credentials(self, credential_id):
+        """Return credentials from an ID.
+
+        :param credential_id: id of credential
+        :raises exception.Unauthorized: when credential id is invalid
+        :returns: credential: dict of ec2 credential.
+        """
+        ec2_credential_id = utils.hash_access_key(credential_id)
+        creds = self.credential_api.get_credential(ec2_credential_id)
+        if not creds:
+            raise exception.Unauthorized(message='EC2 access key not found.')
+        return self._convert_v3_to_ec2_credential(creds)
+
+
+@dependency.requires('policy_api', 'token_provider_api')
+class Ec2Controller(Ec2ControllerCommon, controller.V2Controller):
+
+    @controller.v2_deprecated
+    def authenticate(self, context, credentials=None, ec2Credentials=None):
+        (user_ref, tenant_ref, metadata_ref, roles_ref,
+         catalog_ref) = self._authenticate(credentials=credentials,
+                                           ec2credentials=ec2Credentials)
+
+        # NOTE(morganfainberg): Make sure the data is in correct form since it
+        # might be consumed external to Keystone and this is a v2.0 controller.
+        # The token provider does not explicitly care about user_ref version
+        # in this case, but the data is stored in the token itself and should
+        # match the version
+        user_ref = self.v3_to_v2_user(user_ref)
+        auth_token_data = dict(user=user_ref,
+                               tenant=tenant_ref,
+                               metadata=metadata_ref,
+                               id='placeholder')
+        (token_id, token_data) = self.token_provider_api.issue_v2_token(
+            auth_token_data, roles_ref, catalog_ref)
+        return token_data
+
+    @controller.v2_deprecated
+    def get_credential(self, context, user_id, credential_id):
+        if not self._is_admin(context):
+            self._assert_identity(context, user_id)
+        return super(Ec2Controller, self).get_credential(user_id,
+                                                         credential_id)
+
+    @controller.v2_deprecated
+    def get_credentials(self, context, user_id):
+        if not self._is_admin(context):
+            self._assert_identity(context, user_id)
+        return super(Ec2Controller, self).get_credentials(user_id)
+
+    @controller.v2_deprecated
+    def create_credential(self, context, user_id, tenant_id):
+        if not self._is_admin(context):
+            self._assert_identity(context, user_id)
+        return super(Ec2Controller, self).create_credential(context, user_id,
+                                                            tenant_id)
+
+    @controller.v2_deprecated
+    def delete_credential(self, context, user_id, credential_id):
+        if not self._is_admin(context):
+            self._assert_identity(context, user_id)
+            self._assert_owner(user_id, credential_id)
+        return super(Ec2Controller, self).delete_credential(user_id,
+                                                            credential_id)
+
+    def _assert_identity(self, context, user_id):
+        """Check that the provided token belongs to the user.
+
+        :param context: standard context
+        :param user_id: id of user
+        :raises exception.Forbidden: when token is invalid
+
+        """
+        try:
+            token_data = self.token_provider_api.validate_token(
+                context['token_id'])
+        except exception.TokenNotFound as e:
+            raise exception.Unauthorized(e)
+
+        token_ref = token_model.KeystoneToken(token_id=context['token_id'],
+                                              token_data=token_data)
+
+        if token_ref.user_id != user_id:
+            raise exception.Forbidden(_('Token belongs to another user'))
+
+    def _is_admin(self, context):
+        """Wrap admin assertion error return statement.
+
+        :param context: standard context
+        :returns: bool: success
+
+        """
+        try:
+            # NOTE(morganfainberg): policy_api is required for assert_admin
+            # to properly perform policy enforcement.
+            self.assert_admin(context)
+            return True
+        except exception.Forbidden:
+            return False
+
+    def _assert_owner(self, user_id, credential_id):
+        """Ensure the provided user owns the credential.
+
+        :param user_id: expected credential owner
+        :param credential_id: id of credential object
+        :raises exception.Forbidden: on failure
+
+        """
+        ec2_credential_id = utils.hash_access_key(credential_id)
+        cred_ref = self.credential_api.get_credential(ec2_credential_id)
+        if user_id != cred_ref['user_id']:
+            raise exception.Forbidden(_('Credential belongs to another user'))
+
+
+@dependency.requires('policy_api', 'token_provider_api')
+class Ec2ControllerV3(Ec2ControllerCommon, controller.V3Controller):
+
+    member_name = 'project'
+
+    def __init__(self):
+        super(Ec2ControllerV3, self).__init__()
+        self.get_member_from_driver = self.credential_api.get_credential
+
+    def _check_credential_owner_and_user_id_match(self, context, prep_info,
+                                                  user_id, credential_id):
+        # NOTE(morganfainberg): this method needs to capture the arguments of
+        # the method that is decorated with @controller.protected() (with
+        # exception of the first argument ('context') since the protected
+        # method passes in *args, **kwargs. In this case, it is easier to see
+        # the expected input if the argspec is `user_id` and `credential_id`
+        # explicitly (matching the :class:`.ec2_delete_credential()` method
+        # below).
+        ref = {}
+        credential_id = utils.hash_access_key(credential_id)
+        ref['credential'] = self.credential_api.get_credential(credential_id)
+        # NOTE(morganfainberg): policy_api is required for this
+        # check_protection to properly be able to perform policy enforcement.
+        self.check_protection(context, prep_info, ref)
+
+    def authenticate(self, context, credentials=None, ec2Credentials=None):
+        (user_ref, project_ref, metadata_ref, roles_ref,
+         catalog_ref) = self._authenticate(credentials=credentials,
+                                           ec2credentials=ec2Credentials)
+
+        method_names = ['ec2credential']
+
+        token_id, token_data = self.token_provider_api.issue_v3_token(
+            user_ref['id'], method_names, project_id=project_ref['id'],
+            metadata_ref=metadata_ref)
+        return render_token_data_response(token_id, token_data)
+
+    @controller.protected(callback=_check_credential_owner_and_user_id_match)
+    def ec2_get_credential(self, context, user_id, credential_id):
+        return super(Ec2ControllerV3, self).get_credential(user_id,
+                                                           credential_id)
+
+    @controller.protected()
+    def ec2_list_credentials(self, context, user_id):
+        return super(Ec2ControllerV3, self).get_credentials(user_id)
+
+    @controller.protected()
+    def ec2_create_credential(self, context, user_id, tenant_id):
+        return super(Ec2ControllerV3, self).create_credential(context, user_id,
+                                                              tenant_id)
+
+    @controller.protected(callback=_check_credential_owner_and_user_id_match)
+    def ec2_delete_credential(self, context, user_id, credential_id):
+        return super(Ec2ControllerV3, self).delete_credential(user_id,
+                                                              credential_id)
+
+
+def render_token_data_response(token_id, token_data):
+    """Render token data HTTP response.
+
+    Stash token ID into the X-Subject-Token header.
+
+    """
+    headers = [('X-Subject-Token', token_id)]
+
+    return wsgi.render_response(body=token_data,
+                                status=(200, 'OK'), headers=headers)
diff --git a/keystone-moon/keystone/contrib/ec2/core.py b/keystone-moon/keystone/contrib/ec2/core.py
new file mode 100644 (file)
index 0000000..77857af
--- /dev/null
@@ -0,0 +1,34 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import extension
+
+
+EXTENSION_DATA = {
+    'name': 'OpenStack EC2 API',
+    'namespace': 'http://docs.openstack.org/identity/api/ext/'
+                 'OS-EC2/v1.0',
+    'alias': 'OS-EC2',
+    'updated': '2013-07-07T12:00:0-00:00',
+    'description': 'OpenStack EC2 Credentials backend.',
+    'links': [
+        {
+            'rel': 'describedby',
+            # TODO(ayoung): needs a description
+            'type': 'text/html',
+            'href': 'https://github.com/openstack/identity-api',
+        }
+    ]}
+extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
+extension.register_public_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
diff --git a/keystone-moon/keystone/contrib/ec2/routers.py b/keystone-moon/keystone/contrib/ec2/routers.py
new file mode 100644 (file)
index 0000000..7b6bf11
--- /dev/null
@@ -0,0 +1,95 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+
+from keystone.common import json_home
+from keystone.common import wsgi
+from keystone.contrib.ec2 import controllers
+
+
+build_resource_relation = functools.partial(
+    json_home.build_v3_extension_resource_relation, extension_name='OS-EC2',
+    extension_version='1.0')
+
+build_parameter_relation = functools.partial(
+    json_home.build_v3_extension_parameter_relation, extension_name='OS-EC2',
+    extension_version='1.0')
+
+
+class Ec2Extension(wsgi.ExtensionRouter):
+    def add_routes(self, mapper):
+        ec2_controller = controllers.Ec2Controller()
+        # validation
+        mapper.connect(
+            '/ec2tokens',
+            controller=ec2_controller,
+            action='authenticate',
+            conditions=dict(method=['POST']))
+
+        # crud
+        mapper.connect(
+            '/users/{user_id}/credentials/OS-EC2',
+            controller=ec2_controller,
+            action='create_credential',
+            conditions=dict(method=['POST']))
+        mapper.connect(
+            '/users/{user_id}/credentials/OS-EC2',
+            controller=ec2_controller,
+            action='get_credentials',
+            conditions=dict(method=['GET']))
+        mapper.connect(
+            '/users/{user_id}/credentials/OS-EC2/{credential_id}',
+            controller=ec2_controller,
+            action='get_credential',
+            conditions=dict(method=['GET']))
+        mapper.connect(
+            '/users/{user_id}/credentials/OS-EC2/{credential_id}',
+            controller=ec2_controller,
+            action='delete_credential',
+            conditions=dict(method=['DELETE']))
+
+
+class Ec2ExtensionV3(wsgi.V3ExtensionRouter):
+
+    def add_routes(self, mapper):
+        ec2_controller = controllers.Ec2ControllerV3()
+        # validation
+        self._add_resource(
+            mapper, ec2_controller,
+            path='/ec2tokens',
+            post_action='authenticate',
+            rel=build_resource_relation(resource_name='ec2tokens'))
+
+        # crud
+        self._add_resource(
+            mapper, ec2_controller,
+            path='/users/{user_id}/credentials/OS-EC2',
+            get_action='ec2_list_credentials',
+            post_action='ec2_create_credential',
+            rel=build_resource_relation(resource_name='user_credentials'),
+            path_vars={
+                'user_id': json_home.Parameters.USER_ID,
+            })
+        self._add_resource(
+            mapper, ec2_controller,
+            path='/users/{user_id}/credentials/OS-EC2/{credential_id}',
+            get_action='ec2_get_credential',
+            delete_action='ec2_delete_credential',
+            rel=build_resource_relation(resource_name='user_credential'),
+            path_vars={
+                'credential_id':
+                build_parameter_relation(parameter_name='credential_id'),
+                'user_id': json_home.Parameters.USER_ID,
+            })
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/__init__.py b/keystone-moon/keystone/contrib/endpoint_filter/__init__.py
new file mode 100644 (file)
index 0000000..72508c3
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.contrib.endpoint_filter.core import *  # noqa
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/backends/__init__.py b/keystone-moon/keystone/contrib/endpoint_filter/backends/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/backends/catalog_sql.py b/keystone-moon/keystone/contrib/endpoint_filter/backends/catalog_sql.py
new file mode 100644 (file)
index 0000000..6ac3c1c
--- /dev/null
@@ -0,0 +1,76 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+import six
+
+from keystone.catalog.backends import sql
+from keystone.catalog import core as catalog_core
+from keystone.common import dependency
+from keystone import exception
+
+CONF = cfg.CONF
+
+
+@dependency.requires('endpoint_filter_api')
+class EndpointFilterCatalog(sql.Catalog):
+    def get_v3_catalog(self, user_id, project_id):
+        substitutions = dict(six.iteritems(CONF))
+        substitutions.update({'tenant_id': project_id, 'user_id': user_id})
+
+        services = {}
+
+        refs = self.endpoint_filter_api.list_endpoints_for_project(project_id)
+
+        if (not refs and
+                CONF.endpoint_filter.return_all_endpoints_if_no_filter):
+            return super(EndpointFilterCatalog, self).get_v3_catalog(
+                user_id, project_id)
+
+        for entry in refs:
+            try:
+                endpoint = self.get_endpoint(entry['endpoint_id'])
+                if not endpoint['enabled']:
+                    # Skip disabled endpoints.
+                    continue
+                service_id = endpoint['service_id']
+                services.setdefault(
+                    service_id,
+                    self.get_service(service_id))
+                service = services[service_id]
+                del endpoint['service_id']
+                del endpoint['enabled']
+                del endpoint['legacy_endpoint_id']
+                endpoint['url'] = catalog_core.format_url(
+                    endpoint['url'], substitutions)
+                # populate filtered endpoints
+                if 'endpoints' in services[service_id]:
+                    service['endpoints'].append(endpoint)
+                else:
+                    service['endpoints'] = [endpoint]
+            except exception.EndpointNotFound:
+                # remove bad reference from association
+                self.endpoint_filter_api.remove_endpoint_from_project(
+                    entry['endpoint_id'], project_id)
+
+        # format catalog
+        catalog = []
+        for service_id, service in six.iteritems(services):
+            formatted_service = {}
+            formatted_service['id'] = service['id']
+            formatted_service['type'] = service['type']
+            formatted_service['endpoints'] = service['endpoints']
+            catalog.append(formatted_service)
+
+        return catalog
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/backends/sql.py b/keystone-moon/keystone/contrib/endpoint_filter/backends/sql.py
new file mode 100644 (file)
index 0000000..a998423
--- /dev/null
@@ -0,0 +1,224 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import sql
+from keystone import exception
+from keystone.i18n import _
+
+
+class ProjectEndpoint(sql.ModelBase, sql.ModelDictMixin):
+    """project-endpoint relationship table."""
+    __tablename__ = 'project_endpoint'
+    attributes = ['endpoint_id', 'project_id']
+    endpoint_id = sql.Column(sql.String(64),
+                             primary_key=True,
+                             nullable=False)
+    project_id = sql.Column(sql.String(64),
+                            primary_key=True,
+                            nullable=False)
+
+
+class EndpointGroup(sql.ModelBase, sql.ModelDictMixin):
+    """Endpoint Groups table."""
+    __tablename__ = 'endpoint_group'
+    attributes = ['id', 'name', 'description', 'filters']
+    mutable_attributes = frozenset(['name', 'description', 'filters'])
+    id = sql.Column(sql.String(64), primary_key=True)
+    name = sql.Column(sql.String(255), nullable=False)
+    description = sql.Column(sql.Text, nullable=True)
+    filters = sql.Column(sql.JsonBlob(), nullable=False)
+
+
+class ProjectEndpointGroupMembership(sql.ModelBase, sql.ModelDictMixin):
+    """Project to Endpoint group relationship table."""
+    __tablename__ = 'project_endpoint_group'
+    attributes = ['endpoint_group_id', 'project_id']
+    endpoint_group_id = sql.Column(sql.String(64),
+                                   sql.ForeignKey('endpoint_group.id'),
+                                   nullable=False)
+    project_id = sql.Column(sql.String(64), nullable=False)
+    __table_args__ = (sql.PrimaryKeyConstraint('endpoint_group_id',
+                                               'project_id'), {})
+
+
+class EndpointFilter(object):
+
+    @sql.handle_conflicts(conflict_type='project_endpoint')
+    def add_endpoint_to_project(self, endpoint_id, project_id):
+        session = sql.get_session()
+        with session.begin():
+            endpoint_filter_ref = ProjectEndpoint(endpoint_id=endpoint_id,
+                                                  project_id=project_id)
+            session.add(endpoint_filter_ref)
+
+    def _get_project_endpoint_ref(self, session, endpoint_id, project_id):
+        endpoint_filter_ref = session.query(ProjectEndpoint).get(
+            (endpoint_id, project_id))
+        if endpoint_filter_ref is None:
+            msg = _('Endpoint %(endpoint_id)s not found in project '
+                    '%(project_id)s') % {'endpoint_id': endpoint_id,
+                                         'project_id': project_id}
+            raise exception.NotFound(msg)
+        return endpoint_filter_ref
+
+    def check_endpoint_in_project(self, endpoint_id, project_id):
+        session = sql.get_session()
+        self._get_project_endpoint_ref(session, endpoint_id, project_id)
+
+    def remove_endpoint_from_project(self, endpoint_id, project_id):
+        session = sql.get_session()
+        endpoint_filter_ref = self._get_project_endpoint_ref(
+            session, endpoint_id, project_id)
+        with session.begin():
+            session.delete(endpoint_filter_ref)
+
+    def list_endpoints_for_project(self, project_id):
+        session = sql.get_session()
+        query = session.query(ProjectEndpoint)
+        query = query.filter_by(project_id=project_id)
+        endpoint_filter_refs = query.all()
+        return [ref.to_dict() for ref in endpoint_filter_refs]
+
+    def list_projects_for_endpoint(self, endpoint_id):
+        session = sql.get_session()
+        query = session.query(ProjectEndpoint)
+        query = query.filter_by(endpoint_id=endpoint_id)
+        endpoint_filter_refs = query.all()
+        return [ref.to_dict() for ref in endpoint_filter_refs]
+
+    def delete_association_by_endpoint(self, endpoint_id):
+        session = sql.get_session()
+        with session.begin():
+            query = session.query(ProjectEndpoint)
+            query = query.filter_by(endpoint_id=endpoint_id)
+            query.delete(synchronize_session=False)
+
+    def delete_association_by_project(self, project_id):
+        session = sql.get_session()
+        with session.begin():
+            query = session.query(ProjectEndpoint)
+            query = query.filter_by(project_id=project_id)
+            query.delete(synchronize_session=False)
+
+    def create_endpoint_group(self, endpoint_group_id, endpoint_group):
+        session = sql.get_session()
+        with session.begin():
+            endpoint_group_ref = EndpointGroup.from_dict(endpoint_group)
+            session.add(endpoint_group_ref)
+        return endpoint_group_ref.to_dict()
+
+    def _get_endpoint_group(self, session, endpoint_group_id):
+        endpoint_group_ref = session.query(EndpointGroup).get(
+            endpoint_group_id)
+        if endpoint_group_ref is None:
+            raise exception.EndpointGroupNotFound(
+                endpoint_group_id=endpoint_group_id)
+        return endpoint_group_ref
+
+    def get_endpoint_group(self, endpoint_group_id):
+        session = sql.get_session()
+        endpoint_group_ref = self._get_endpoint_group(session,
+                                                      endpoint_group_id)
+        return endpoint_group_ref.to_dict()
+
+    def update_endpoint_group(self, endpoint_group_id, endpoint_group):
+        session = sql.get_session()
+        with session.begin():
+            endpoint_group_ref = self._get_endpoint_group(session,
+                                                          endpoint_group_id)
+            old_endpoint_group = endpoint_group_ref.to_dict()
+            old_endpoint_group.update(endpoint_group)
+            new_endpoint_group = EndpointGroup.from_dict(old_endpoint_group)
+            for attr in EndpointGroup.mutable_attributes:
+                setattr(endpoint_group_ref, attr,
+                        getattr(new_endpoint_group, attr))
+        return endpoint_group_ref.to_dict()
+
+    def delete_endpoint_group(self, endpoint_group_id):
+        session = sql.get_session()
+        endpoint_group_ref = self._get_endpoint_group(session,
+                                                      endpoint_group_id)
+        with session.begin():
+            session.delete(endpoint_group_ref)
+            self._delete_endpoint_group_association_by_endpoint_group(
+                session, endpoint_group_id)
+
+    def get_endpoint_group_in_project(self, endpoint_group_id, project_id):
+        session = sql.get_session()
+        ref = self._get_endpoint_group_in_project(session,
+                                                  endpoint_group_id,
+                                                  project_id)
+        return ref.to_dict()
+
+    @sql.handle_conflicts(conflict_type='project_endpoint_group')
+    def add_endpoint_group_to_project(self, endpoint_group_id, project_id):
+        session = sql.get_session()
+
+        with session.begin():
+            # Create a new Project Endpoint group entity
+            endpoint_group_project_ref = ProjectEndpointGroupMembership(
+                endpoint_group_id=endpoint_group_id, project_id=project_id)
+            session.add(endpoint_group_project_ref)
+
+    def _get_endpoint_group_in_project(self, session,
+                                       endpoint_group_id, project_id):
+        endpoint_group_project_ref = session.query(
+            ProjectEndpointGroupMembership).get((endpoint_group_id,
+                                                 project_id))
+        if endpoint_group_project_ref is None:
+            msg = _('Endpoint Group Project Association not found')
+            raise exception.NotFound(msg)
+        else:
+            return endpoint_group_project_ref
+
+    def list_endpoint_groups(self):
+        session = sql.get_session()
+        query = session.query(EndpointGroup)
+        endpoint_group_refs = query.all()
+        return [e.to_dict() for e in endpoint_group_refs]
+
+    def list_endpoint_groups_for_project(self, project_id):
+        session = sql.get_session()
+        query = session.query(ProjectEndpointGroupMembership)
+        query = query.filter_by(project_id=project_id)
+        endpoint_group_refs = query.all()
+        return [ref.to_dict() for ref in endpoint_group_refs]
+
+    def remove_endpoint_group_from_project(self, endpoint_group_id,
+                                           project_id):
+        session = sql.get_session()
+        endpoint_group_project_ref = self._get_endpoint_group_in_project(
+            session, endpoint_group_id, project_id)
+        with session.begin():
+            session.delete(endpoint_group_project_ref)
+
+    def list_projects_associated_with_endpoint_group(self, endpoint_group_id):
+        session = sql.get_session()
+        query = session.query(ProjectEndpointGroupMembership)
+        query = query.filter_by(endpoint_group_id=endpoint_group_id)
+        endpoint_group_refs = query.all()
+        return [ref.to_dict() for ref in endpoint_group_refs]
+
+    def _delete_endpoint_group_association_by_endpoint_group(
+            self, session, endpoint_group_id):
+        query = session.query(ProjectEndpointGroupMembership)
+        query = query.filter_by(endpoint_group_id=endpoint_group_id)
+        query.delete()
+
+    def delete_endpoint_group_association_by_project(self, project_id):
+        session = sql.get_session()
+        with session.begin():
+            query = session.query(ProjectEndpointGroupMembership)
+            query = query.filter_by(project_id=project_id)
+            query.delete()
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/controllers.py b/keystone-moon/keystone/contrib/endpoint_filter/controllers.py
new file mode 100644 (file)
index 0000000..dc4ef7a
--- /dev/null
@@ -0,0 +1,300 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import six
+
+from keystone.catalog import controllers as catalog_controllers
+from keystone.common import controller
+from keystone.common import dependency
+from keystone.common import validation
+from keystone.contrib.endpoint_filter import schema
+from keystone import exception
+from keystone import notifications
+from keystone import resource
+
+
+@dependency.requires('catalog_api', 'endpoint_filter_api', 'resource_api')
+class _ControllerBase(controller.V3Controller):
+    """Base behaviors for endpoint filter controllers."""
+
+    def _get_endpoint_groups_for_project(self, project_id):
+        # recover the project endpoint group memberships and for each
+        # membership recover the endpoint group
+        self.resource_api.get_project(project_id)
+        try:
+            refs = self.endpoint_filter_api.list_endpoint_groups_for_project(
+                project_id)
+            endpoint_groups = [self.endpoint_filter_api.get_endpoint_group(
+                ref['endpoint_group_id']) for ref in refs]
+            return endpoint_groups
+        except exception.EndpointGroupNotFound:
+            return []
+
+    def _get_endpoints_filtered_by_endpoint_group(self, endpoint_group_id):
+        endpoints = self.catalog_api.list_endpoints()
+        filters = self.endpoint_filter_api.get_endpoint_group(
+            endpoint_group_id)['filters']
+        filtered_endpoints = []
+
+        for endpoint in endpoints:
+            is_candidate = True
+            for key, value in six.iteritems(filters):
+                if endpoint[key] != value:
+                    is_candidate = False
+                    break
+            if is_candidate:
+                filtered_endpoints.append(endpoint)
+        return filtered_endpoints
+
+
+class EndpointFilterV3Controller(_ControllerBase):
+
+    def __init__(self):
+        super(EndpointFilterV3Controller, self).__init__()
+        notifications.register_event_callback(
+            notifications.ACTIONS.deleted, 'project',
+            self._on_project_or_endpoint_delete)
+        notifications.register_event_callback(
+            notifications.ACTIONS.deleted, 'endpoint',
+            self._on_project_or_endpoint_delete)
+
+    def _on_project_or_endpoint_delete(self, service, resource_type, operation,
+                                       payload):
+        project_or_endpoint_id = payload['resource_info']
+        if resource_type == 'project':
+            self.endpoint_filter_api.delete_association_by_project(
+                project_or_endpoint_id)
+        else:
+            self.endpoint_filter_api.delete_association_by_endpoint(
+                project_or_endpoint_id)
+
+    @controller.protected()
+    def add_endpoint_to_project(self, context, project_id, endpoint_id):
+        """Establishes an association between an endpoint and a project."""
+        # NOTE(gyee): we just need to make sure endpoint and project exist
+        # first. We don't really care whether if project is disabled.
+        # The relationship can still be established even with a disabled
+        # project as there are no security implications.
+        self.catalog_api.get_endpoint(endpoint_id)
+        self.resource_api.get_project(project_id)
+        self.endpoint_filter_api.add_endpoint_to_project(endpoint_id,
+                                                         project_id)
+
+    @controller.protected()
+    def check_endpoint_in_project(self, context, project_id, endpoint_id):
+        """Verifies endpoint is currently associated with given project."""
+        self.catalog_api.get_endpoint(endpoint_id)
+        self.resource_api.get_project(project_id)
+        self.endpoint_filter_api.check_endpoint_in_project(endpoint_id,
+                                                           project_id)
+
+    @controller.protected()
+    def list_endpoints_for_project(self, context, project_id):
+        """List all endpoints currently associated with a given project."""
+        self.resource_api.get_project(project_id)
+        refs = self.endpoint_filter_api.list_endpoints_for_project(project_id)
+        filtered_endpoints = {ref['endpoint_id']:
+                              self.catalog_api.get_endpoint(ref['endpoint_id'])
+                              for ref in refs}
+
+        # need to recover endpoint_groups associated with project
+        # then for each endpoint group return the endpoints.
+        endpoint_groups = self._get_endpoint_groups_for_project(project_id)
+        for endpoint_group in endpoint_groups:
+            endpoint_refs = self._get_endpoints_filtered_by_endpoint_group(
+                endpoint_group['id'])
+            # now check if any endpoints for current endpoint group are not
+            # contained in the list of filtered endpoints
+            for endpoint_ref in endpoint_refs:
+                if endpoint_ref['id'] not in filtered_endpoints:
+                    filtered_endpoints[endpoint_ref['id']] = endpoint_ref
+
+        return catalog_controllers.EndpointV3.wrap_collection(
+            context, [v for v in six.itervalues(filtered_endpoints)])
+
+    @controller.protected()
+    def remove_endpoint_from_project(self, context, project_id, endpoint_id):
+        """Remove the endpoint from the association with given project."""
+        self.endpoint_filter_api.remove_endpoint_from_project(endpoint_id,
+                                                              project_id)
+
+    @controller.protected()
+    def list_projects_for_endpoint(self, context, endpoint_id):
+        """Return a list of projects associated with the endpoint."""
+        self.catalog_api.get_endpoint(endpoint_id)
+        refs = self.endpoint_filter_api.list_projects_for_endpoint(endpoint_id)
+
+        projects = [self.resource_api.get_project(
+            ref['project_id']) for ref in refs]
+        return resource.controllers.ProjectV3.wrap_collection(context,
+                                                              projects)
+
+
+class EndpointGroupV3Controller(_ControllerBase):
+    collection_name = 'endpoint_groups'
+    member_name = 'endpoint_group'
+
+    VALID_FILTER_KEYS = ['service_id', 'region_id', 'interface']
+
+    def __init__(self):
+        super(EndpointGroupV3Controller, self).__init__()
+
+    @classmethod
+    def base_url(cls, context, path=None):
+        """Construct a path and pass it to V3Controller.base_url method."""
+
+        path = '/OS-EP-FILTER/' + cls.collection_name
+        return super(EndpointGroupV3Controller, cls).base_url(context,
+                                                              path=path)
+
+    @controller.protected()
+    @validation.validated(schema.endpoint_group_create, 'endpoint_group')
+    def create_endpoint_group(self, context, endpoint_group):
+        """Creates an Endpoint Group with the associated filters."""
+        ref = self._assign_unique_id(self._normalize_dict(endpoint_group))
+        self._require_attribute(ref, 'filters')
+        self._require_valid_filter(ref)
+        ref = self.endpoint_filter_api.create_endpoint_group(ref['id'], ref)
+        return EndpointGroupV3Controller.wrap_member(context, ref)
+
+    def _require_valid_filter(self, endpoint_group):
+        filters = endpoint_group.get('filters')
+        for key in six.iterkeys(filters):
+            if key not in self.VALID_FILTER_KEYS:
+                raise exception.ValidationError(
+                    attribute=self._valid_filter_keys(),
+                    target='endpoint_group')
+
+    def _valid_filter_keys(self):
+        return ' or '.join(self.VALID_FILTER_KEYS)
+
+    @controller.protected()
+    def get_endpoint_group(self, context, endpoint_group_id):
+        """Retrieve the endpoint group associated with the id if exists."""
+        ref = self.endpoint_filter_api.get_endpoint_group(endpoint_group_id)
+        return EndpointGroupV3Controller.wrap_member(
+            context, ref)
+
+    @controller.protected()
+    @validation.validated(schema.endpoint_group_update, 'endpoint_group')
+    def update_endpoint_group(self, context, endpoint_group_id,
+                              endpoint_group):
+        """Update fixed values and/or extend the filters."""
+        if 'filters' in endpoint_group:
+            self._require_valid_filter(endpoint_group)
+        ref = self.endpoint_filter_api.update_endpoint_group(endpoint_group_id,
+                                                             endpoint_group)
+        return EndpointGroupV3Controller.wrap_member(
+            context, ref)
+
+    @controller.protected()
+    def delete_endpoint_group(self, context, endpoint_group_id):
+        """Delete endpoint_group."""
+        self.endpoint_filter_api.delete_endpoint_group(endpoint_group_id)
+
+    @controller.protected()
+    def list_endpoint_groups(self, context):
+        """List all endpoint groups."""
+        refs = self.endpoint_filter_api.list_endpoint_groups()
+        return EndpointGroupV3Controller.wrap_collection(
+            context, refs)
+
+    @controller.protected()
+    def list_endpoint_groups_for_project(self, context, project_id):
+        """List all endpoint groups associated with a given project."""
+        return EndpointGroupV3Controller.wrap_collection(
+            context, self._get_endpoint_groups_for_project(project_id))
+
+    @controller.protected()
+    def list_projects_associated_with_endpoint_group(self,
+                                                     context,
+                                                     endpoint_group_id):
+        """List all projects associated with endpoint group."""
+        endpoint_group_refs = (self.endpoint_filter_api.
+                               list_projects_associated_with_endpoint_group(
+                                   endpoint_group_id))
+        projects = []
+        for endpoint_group_ref in endpoint_group_refs:
+            project = self.resource_api.get_project(
+                endpoint_group_ref['project_id'])
+            if project:
+                projects.append(project)
+        return resource.controllers.ProjectV3.wrap_collection(context,
+                                                              projects)
+
+    @controller.protected()
+    def list_endpoints_associated_with_endpoint_group(self,
+                                                      context,
+                                                      endpoint_group_id):
+        """List all the endpoints filtered by a specific endpoint group."""
+        filtered_endpoints = self._get_endpoints_filtered_by_endpoint_group(
+            endpoint_group_id)
+        return catalog_controllers.EndpointV3.wrap_collection(
+            context, filtered_endpoints)
+
+
+class ProjectEndpointGroupV3Controller(_ControllerBase):
+    collection_name = 'project_endpoint_groups'
+    member_name = 'project_endpoint_group'
+
+    def __init__(self):
+        super(ProjectEndpointGroupV3Controller, self).__init__()
+        notifications.register_event_callback(
+            notifications.ACTIONS.deleted, 'project',
+            self._on_project_delete)
+
+    def _on_project_delete(self, service, resource_type,
+                           operation, payload):
+        project_id = payload['resource_info']
+        (self.endpoint_filter_api.
+            delete_endpoint_group_association_by_project(
+                project_id))
+
+    @controller.protected()
+    def get_endpoint_group_in_project(self, context, endpoint_group_id,
+                                      project_id):
+        """Retrieve the endpoint group associated with the id if exists."""
+        self.resource_api.get_project(project_id)
+        self.endpoint_filter_api.get_endpoint_group(endpoint_group_id)
+        ref = self.endpoint_filter_api.get_endpoint_group_in_project(
+            endpoint_group_id, project_id)
+        return ProjectEndpointGroupV3Controller.wrap_member(
+            context, ref)
+
+    @controller.protected()
+    def add_endpoint_group_to_project(self, context, endpoint_group_id,
+                                      project_id):
+        """Creates an association between an endpoint group and project."""
+        self.resource_api.get_project(project_id)
+        self.endpoint_filter_api.get_endpoint_group(endpoint_group_id)
+        self.endpoint_filter_api.add_endpoint_group_to_project(
+            endpoint_group_id, project_id)
+
+    @controller.protected()
+    def remove_endpoint_group_from_project(self, context, endpoint_group_id,
+                                           project_id):
+        """Remove the endpoint group from associated project."""
+        self.resource_api.get_project(project_id)
+        self.endpoint_filter_api.get_endpoint_group(endpoint_group_id)
+        self.endpoint_filter_api.remove_endpoint_group_from_project(
+            endpoint_group_id, project_id)
+
+    @classmethod
+    def _add_self_referential_link(cls, context, ref):
+        url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s'
+               '/projects/%(project_id)s' % {
+                   'endpoint_group_id': ref['endpoint_group_id'],
+                   'project_id': ref['project_id']})
+        ref.setdefault('links', {})
+        ref['links']['self'] = url
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/core.py b/keystone-moon/keystone/contrib/endpoint_filter/core.py
new file mode 100644 (file)
index 0000000..972b65d
--- /dev/null
@@ -0,0 +1,289 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+
+from oslo_config import cfg
+from oslo_log import log
+import six
+
+from keystone.common import dependency
+from keystone.common import extension
+from keystone.common import manager
+from keystone import exception
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+extension_data = {
+    'name': 'OpenStack Keystone Endpoint Filter API',
+    'namespace': 'http://docs.openstack.org/identity/api/ext/'
+                 'OS-EP-FILTER/v1.0',
+    'alias': 'OS-EP-FILTER',
+    'updated': '2013-07-23T12:00:0-00:00',
+    'description': 'OpenStack Keystone Endpoint Filter API.',
+    'links': [
+        {
+            'rel': 'describedby',
+            # TODO(ayoung): needs a description
+            'type': 'text/html',
+            'href': 'https://github.com/openstack/identity-api/blob/master'
+                    '/openstack-identity-api/v3/src/markdown/'
+                    'identity-api-v3-os-ep-filter-ext.md',
+        }
+    ]}
+extension.register_admin_extension(extension_data['alias'], extension_data)
+
+
+@dependency.provider('endpoint_filter_api')
+class Manager(manager.Manager):
+    """Default pivot point for the Endpoint Filter backend.
+
+    See :mod:`keystone.common.manager.Manager` for more details on how this
+    dynamically calls the backend.
+
+    """
+
+    def __init__(self):
+        super(Manager, self).__init__(CONF.endpoint_filter.driver)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Driver(object):
+    """Interface description for an Endpoint Filter driver."""
+
+    @abc.abstractmethod
+    def add_endpoint_to_project(self, endpoint_id, project_id):
+        """Create an endpoint to project association.
+
+        :param endpoint_id: identity of endpoint to associate
+        :type endpoint_id: string
+        :param project_id: identity of the project to be associated with
+        :type project_id: string
+        :raises: keystone.exception.Conflict,
+        :returns: None.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def remove_endpoint_from_project(self, endpoint_id, project_id):
+        """Removes an endpoint to project association.
+
+        :param endpoint_id: identity of endpoint to remove
+        :type endpoint_id: string
+        :param project_id: identity of the project associated with
+        :type project_id: string
+        :raises: exception.NotFound
+        :returns: None.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def check_endpoint_in_project(self, endpoint_id, project_id):
+        """Checks if an endpoint is associated with a project.
+
+        :param endpoint_id: identity of endpoint to check
+        :type endpoint_id: string
+        :param project_id: identity of the project associated with
+        :type project_id: string
+        :raises: exception.NotFound
+        :returns: None.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_endpoints_for_project(self, project_id):
+        """List all endpoints associated with a project.
+
+        :param project_id: identity of the project to check
+        :type project_id: string
+        :returns: a list of identity endpoint ids or an empty list.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_projects_for_endpoint(self, endpoint_id):
+        """List all projects associated with an endpoint.
+
+        :param endpoint_id: identity of endpoint to check
+        :type endpoint_id: string
+        :returns: a list of projects or an empty list.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_association_by_endpoint(self, endpoint_id):
+        """Removes all the endpoints to project association with endpoint.
+
+        :param endpoint_id: identity of endpoint to check
+        :type endpoint_id: string
+        :returns: None
+
+        """
+        raise exception.NotImplemented()
+
+    @abc.abstractmethod
+    def delete_association_by_project(self, project_id):
+        """Removes all the endpoints to project association with project.
+
+        :param project_id: identity of the project to check
+        :type project_id: string
+        :returns: None
+
+        """
+        raise exception.NotImplemented()
+
+    @abc.abstractmethod
+    def create_endpoint_group(self, endpoint_group):
+        """Create an endpoint group.
+
+        :param endpoint_group: endpoint group to create
+        :type endpoint_group: dictionary
+        :raises: keystone.exception.Conflict,
+        :returns: an endpoint group representation.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_endpoint_group(self, endpoint_group_id):
+        """Get an endpoint group.
+
+        :param endpoint_group_id: identity of endpoint group to retrieve
+        :type endpoint_group_id: string
+        :raises: exception.NotFound
+        :returns: an endpoint group representation.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def update_endpoint_group(self, endpoint_group_id, endpoint_group):
+        """Update an endpoint group.
+
+        :param endpoint_group_id: identity of endpoint group to retrieve
+        :type endpoint_group_id: string
+        :param endpoint_group: A full or partial endpoint_group
+        :type endpoint_group: dictionary
+        :raises: exception.NotFound
+        :returns: an endpoint group representation.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_endpoint_group(self, endpoint_group_id):
+        """Delete an endpoint group.
+
+        :param endpoint_group_id: identity of endpoint group to delete
+        :type endpoint_group_id: string
+        :raises: exception.NotFound
+        :returns: None.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def add_endpoint_group_to_project(self, endpoint_group_id, project_id):
+        """Adds an endpoint group to project association.
+
+        :param endpoint_group_id: identity of endpoint to associate
+        :type endpoint_group_id: string
+        :param project_id: identity of project to associate
+        :type project_id: string
+        :raises: keystone.exception.Conflict,
+        :returns: None.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_endpoint_group_in_project(self, endpoint_group_id, project_id):
+        """Get endpoint group to project association.
+
+        :param endpoint_group_id: identity of endpoint group to retrieve
+        :type endpoint_group_id: string
+        :param project_id: identity of project to associate
+        :type project_id: string
+        :raises: exception.NotFound
+        :returns: a project endpoint group representation.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_endpoint_groups(self):
+        """List all endpoint groups.
+
+        :raises: exception.NotFound
+        :returns: None.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_endpoint_groups_for_project(self, project_id):
+        """List all endpoint group to project associations for a project.
+
+        :param project_id: identity of project to associate
+        :type project_id: string
+        :raises: exception.NotFound
+        :returns: None.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_projects_associated_with_endpoint_group(self, endpoint_group_id):
+        """List all projects associated with endpoint group.
+
+        :param endpoint_group_id: identity of endpoint to associate
+        :type endpoint_group_id: string
+        :raises: exception.NotFound
+        :returns: None.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def remove_endpoint_group_from_project(self, endpoint_group_id,
+                                           project_id):
+        """Remove an endpoint to project association.
+
+        :param endpoint_group_id: identity of endpoint to associate
+        :type endpoint_group_id: string
+        :param project_id: identity of project to associate
+        :type project_id: string
+        :raises: exception.NotFound
+        :returns: None.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_endpoint_group_association_by_project(self, project_id):
+        """Remove endpoint group to project associations.
+
+        :param project_id: identity of the project to check
+        :type project_id: string
+        :returns: None
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/__init__.py b/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/migrate.cfg b/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/migrate.cfg
new file mode 100644 (file)
index 0000000..c7d3478
--- /dev/null
@@ -0,0 +1,25 @@
+[db_settings]
+# Used to identify which repository this database is versioned under.
+# You can use the name of your project.
+repository_id=endpoint_filter
+
+# The name of the database table used to track the schema version.
+# This name shouldn't already be used by your project.
+# If this is changed once a database is under version control, you'll need to
+# change the table name in each database too.
+version_table=migrate_version
+
+# When committing a change script, Migrate will attempt to generate the
+# sql for all supported databases; normally, if one of them fails - probably
+# because you don't have that database installed - it is ignored and the
+# commit continues, perhaps ending successfully.
+# Databases in this list MUST compile successfully during a commit, or the
+# entire commit will fail. List the databases your application will actually
+# be using to ensure your updates to that database work properly.
+# This must be a list; example: ['postgres','sqlite']
+required_dbs=[]
+
+# When creating new change scripts, Migrate will stamp the new script with
+# a version number. By default this is latest_version + 1. You can set this
+# to 'true' to tell Migrate to use the UTC timestamp instead.
+use_timestamp_numbering=False
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/001_add_endpoint_filtering_table.py b/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/001_add_endpoint_filtering_table.py
new file mode 100644 (file)
index 0000000..090e7f4
--- /dev/null
@@ -0,0 +1,47 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+
+def upgrade(migrate_engine):
+    # Upgrade operations go here. Don't create your own engine; bind
+    # migrate_engine to your metadata
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    endpoint_filtering_table = sql.Table(
+        'project_endpoint',
+        meta,
+        sql.Column(
+            'endpoint_id',
+            sql.String(64),
+            primary_key=True,
+            nullable=False),
+        sql.Column(
+            'project_id',
+            sql.String(64),
+            primary_key=True,
+            nullable=False))
+
+    endpoint_filtering_table.create(migrate_engine, checkfirst=True)
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+    # Operations to reverse the above upgrade go here.
+    for table_name in ['project_endpoint']:
+        table = sql.Table(table_name, meta, autoload=True)
+        table.drop()
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/002_add_endpoint_groups.py b/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/002_add_endpoint_groups.py
new file mode 100644 (file)
index 0000000..5f80160
--- /dev/null
@@ -0,0 +1,51 @@
+# Copyright 2014 Hewlett-Packard Company
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+
+def upgrade(migrate_engine):
+    # Upgrade operations go here. Don't create your own engine; bind
+    # migrate_engine to your metadata
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    endpoint_group_table = sql.Table(
+        'endpoint_group',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('name', sql.String(255), nullable=False),
+        sql.Column('description', sql.Text, nullable=True),
+        sql.Column('filters', sql.Text(), nullable=False))
+    endpoint_group_table.create(migrate_engine, checkfirst=True)
+
+    project_endpoint_group_table = sql.Table(
+        'project_endpoint_group',
+        meta,
+        sql.Column('endpoint_group_id', sql.String(64),
+                   sql.ForeignKey('endpoint_group.id'), nullable=False),
+        sql.Column('project_id', sql.String(64), nullable=False),
+        sql.PrimaryKeyConstraint('endpoint_group_id',
+                                 'project_id'))
+    project_endpoint_group_table.create(migrate_engine, checkfirst=True)
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+    # Operations to reverse the above upgrade go here.
+    for table_name in ['project_endpoint_group',
+                       'endpoint_group']:
+        table = sql.Table(table_name, meta, autoload=True)
+        table.drop()
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/__init__.py b/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/routers.py b/keystone-moon/keystone/contrib/endpoint_filter/routers.py
new file mode 100644 (file)
index 0000000..00c8cd7
--- /dev/null
@@ -0,0 +1,149 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+
+from keystone.common import json_home
+from keystone.common import wsgi
+from keystone.contrib.endpoint_filter import controllers
+
+
+build_resource_relation = functools.partial(
+    json_home.build_v3_extension_resource_relation,
+    extension_name='OS-EP-FILTER', extension_version='1.0')
+
+build_parameter_relation = functools.partial(
+    json_home.build_v3_extension_parameter_relation,
+    extension_name='OS-EP-FILTER', extension_version='1.0')
+
+ENDPOINT_GROUP_PARAMETER_RELATION = build_parameter_relation(
+    parameter_name='endpoint_group_id')
+
+
+class EndpointFilterExtension(wsgi.V3ExtensionRouter):
+    """API Endpoints for the Endpoint Filter extension.
+
+    The API looks like::
+
+        PUT /OS-EP-FILTER/projects/$project_id/endpoints/$endpoint_id
+        GET /OS-EP-FILTER/projects/$project_id/endpoints/$endpoint_id
+        HEAD /OS-EP-FILTER/projects/$project_id/endpoints/$endpoint_id
+        DELETE /OS-EP-FILTER/projects/$project_id/endpoints/$endpoint_id
+        GET /OS-EP-FILTER/endpoints/$endpoint_id/projects
+        GET /OS-EP-FILTER/projects/$project_id/endpoints
+
+        GET /OS-EP-FILTER/endpoint_groups
+        POST /OS-EP-FILTER/endpoint_groups
+        GET /OS-EP-FILTER/endpoint_groups/$endpoint_group_id
+        HEAD /OS-EP-FILTER/endpoint_groups/$endpoint_group_id
+        PATCH /OS-EP-FILTER/endpoint_groups/$endpoint_group_id
+        DELETE /OS-EP-FILTER/endpoint_groups/$endpoint_group_id
+
+        GET /OS-EP-FILTER/endpoint_groups/$endpoint_group_id/projects
+        GET /OS-EP-FILTER/endpoint_groups/$endpoint_group_id/endpoints
+
+        PUT /OS-EP-FILTER/endpoint_groups/$endpoint_group/projects/$project_id
+        GET /OS-EP-FILTER/endpoint_groups/$endpoint_group/projects/$project_id
+        HEAD /OS-EP-FILTER/endpoint_groups/$endpoint_group/projects/$project_id
+        DELETE /OS-EP-FILTER/endpoint_groups/$endpoint_group/projects/
+            $project_id
+
+    """
+    PATH_PREFIX = '/OS-EP-FILTER'
+    PATH_PROJECT_ENDPOINT = '/projects/{project_id}/endpoints/{endpoint_id}'
+    PATH_ENDPOINT_GROUPS = '/endpoint_groups/{endpoint_group_id}'
+    PATH_ENDPOINT_GROUP_PROJECTS = PATH_ENDPOINT_GROUPS + (
+        '/projects/{project_id}')
+
+    def add_routes(self, mapper):
+        endpoint_filter_controller = controllers.EndpointFilterV3Controller()
+        endpoint_group_controller = controllers.EndpointGroupV3Controller()
+        project_endpoint_group_controller = (
+            controllers.ProjectEndpointGroupV3Controller())
+
+        self._add_resource(
+            mapper, endpoint_filter_controller,
+            path=self.PATH_PREFIX + '/endpoints/{endpoint_id}/projects',
+            get_action='list_projects_for_endpoint',
+            rel=build_resource_relation(resource_name='endpoint_projects'),
+            path_vars={
+                'endpoint_id': json_home.Parameters.ENDPOINT_ID,
+            })
+        self._add_resource(
+            mapper, endpoint_filter_controller,
+            path=self.PATH_PREFIX + self.PATH_PROJECT_ENDPOINT,
+            get_head_action='check_endpoint_in_project',
+            put_action='add_endpoint_to_project',
+            delete_action='remove_endpoint_from_project',
+            rel=build_resource_relation(resource_name='project_endpoint'),
+            path_vars={
+                'endpoint_id': json_home.Parameters.ENDPOINT_ID,
+                'project_id': json_home.Parameters.PROJECT_ID,
+            })
+        self._add_resource(
+            mapper, endpoint_filter_controller,
+            path=self.PATH_PREFIX + '/projects/{project_id}/endpoints',
+            get_action='list_endpoints_for_project',
+            rel=build_resource_relation(resource_name='project_endpoints'),
+            path_vars={
+                'project_id': json_home.Parameters.PROJECT_ID,
+            })
+        self._add_resource(
+            mapper, endpoint_group_controller,
+            path=self.PATH_PREFIX + '/endpoint_groups',
+            get_action='list_endpoint_groups',
+            post_action='create_endpoint_group',
+            rel=build_resource_relation(resource_name='endpoint_groups'))
+        self._add_resource(
+            mapper, endpoint_group_controller,
+            path=self.PATH_PREFIX + self.PATH_ENDPOINT_GROUPS,
+            get_head_action='get_endpoint_group',
+            patch_action='update_endpoint_group',
+            delete_action='delete_endpoint_group',
+            rel=build_resource_relation(resource_name='endpoint_group'),
+            path_vars={
+                'endpoint_group_id': ENDPOINT_GROUP_PARAMETER_RELATION
+            })
+        self._add_resource(
+            mapper, project_endpoint_group_controller,
+            path=self.PATH_PREFIX + self.PATH_ENDPOINT_GROUP_PROJECTS,
+            get_head_action='get_endpoint_group_in_project',
+            put_action='add_endpoint_group_to_project',
+            delete_action='remove_endpoint_group_from_project',
+            rel=build_resource_relation(
+                resource_name='endpoint_group_to_project_association'),
+            path_vars={
+                'project_id': json_home.Parameters.PROJECT_ID,
+                'endpoint_group_id': ENDPOINT_GROUP_PARAMETER_RELATION
+            })
+        self._add_resource(
+            mapper, endpoint_group_controller,
+            path=self.PATH_PREFIX + self.PATH_ENDPOINT_GROUPS + (
+                '/projects'),
+            get_action='list_projects_associated_with_endpoint_group',
+            rel=build_resource_relation(
+                resource_name='projects_associated_with_endpoint_group'),
+            path_vars={
+                'endpoint_group_id': ENDPOINT_GROUP_PARAMETER_RELATION
+            })
+        self._add_resource(
+            mapper, endpoint_group_controller,
+            path=self.PATH_PREFIX + self.PATH_ENDPOINT_GROUPS + (
+                '/endpoints'),
+            get_action='list_endpoints_associated_with_endpoint_group',
+            rel=build_resource_relation(
+                resource_name='endpoints_in_endpoint_group'),
+            path_vars={
+                'endpoint_group_id': ENDPOINT_GROUP_PARAMETER_RELATION
+            })
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/schema.py b/keystone-moon/keystone/contrib/endpoint_filter/schema.py
new file mode 100644 (file)
index 0000000..cbe54e3
--- /dev/null
@@ -0,0 +1,35 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import validation
+from keystone.common.validation import parameter_types
+
+
+_endpoint_group_properties = {
+    'description': validation.nullable(parameter_types.description),
+    'filters': {
+        'type': 'object'
+    },
+    'name': parameter_types.name
+}
+
+endpoint_group_create = {
+    'type': 'object',
+    'properties': _endpoint_group_properties,
+    'required': ['name', 'filters']
+}
+
+endpoint_group_update = {
+    'type': 'object',
+    'properties': _endpoint_group_properties,
+    'minProperties': 1
+}
diff --git a/keystone-moon/keystone/contrib/endpoint_policy/__init__.py b/keystone-moon/keystone/contrib/endpoint_policy/__init__.py
new file mode 100644 (file)
index 0000000..12722dc
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.contrib.endpoint_policy.core import *  # noqa
diff --git a/keystone-moon/keystone/contrib/endpoint_policy/backends/__init__.py b/keystone-moon/keystone/contrib/endpoint_policy/backends/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/contrib/endpoint_policy/backends/sql.py b/keystone-moon/keystone/contrib/endpoint_policy/backends/sql.py
new file mode 100644 (file)
index 0000000..484444f
--- /dev/null
@@ -0,0 +1,140 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import sqlalchemy
+
+from keystone.common import sql
+from keystone import exception
+
+
+class PolicyAssociation(sql.ModelBase, sql.ModelDictMixin):
+    __tablename__ = 'policy_association'
+    attributes = ['policy_id', 'endpoint_id', 'region_id', 'service_id']
+    # The id column is never exposed outside this module. It only exists to
+    # provide a primary key, given that the real columns we would like to use
+    # (endpoint_id, service_id, region_id) can be null
+    id = sql.Column(sql.String(64), primary_key=True)
+    policy_id = sql.Column(sql.String(64), nullable=False)
+    endpoint_id = sql.Column(sql.String(64), nullable=True)
+    service_id = sql.Column(sql.String(64), nullable=True)
+    region_id = sql.Column(sql.String(64), nullable=True)
+    __table_args__ = (sql.UniqueConstraint('endpoint_id', 'service_id',
+                                           'region_id'), {})
+
+    def to_dict(self):
+        """Returns the model's attributes as a dictionary.
+
+        We override the standard method in order to hide the id column,
+        since this only exists to provide the table with a primary key.
+
+        """
+        d = {}
+        for attr in self.__class__.attributes:
+            d[attr] = getattr(self, attr)
+        return d
+
+
+class EndpointPolicy(object):
+
+    def create_policy_association(self, policy_id, endpoint_id=None,
+                                  service_id=None, region_id=None):
+        with sql.transaction() as session:
+            try:
+                # See if there is already a row for this association, and if
+                # so, update it with the new policy_id
+                query = session.query(PolicyAssociation)
+                query = query.filter_by(endpoint_id=endpoint_id)
+                query = query.filter_by(service_id=service_id)
+                query = query.filter_by(region_id=region_id)
+                association = query.one()
+                association.policy_id = policy_id
+            except sql.NotFound:
+                association = PolicyAssociation(id=uuid.uuid4().hex,
+                                                policy_id=policy_id,
+                                                endpoint_id=endpoint_id,
+                                                service_id=service_id,
+                                                region_id=region_id)
+                session.add(association)
+
+    def check_policy_association(self, policy_id, endpoint_id=None,
+                                 service_id=None, region_id=None):
+        sql_constraints = sqlalchemy.and_(
+            PolicyAssociation.policy_id == policy_id,
+            PolicyAssociation.endpoint_id == endpoint_id,
+            PolicyAssociation.service_id == service_id,
+            PolicyAssociation.region_id == region_id)
+
+        # NOTE(henry-nash): Getting a single value to save object
+        # management overhead.
+        with sql.transaction() as session:
+            if session.query(PolicyAssociation.id).filter(
+                    sql_constraints).distinct().count() == 0:
+                raise exception.PolicyAssociationNotFound()
+
+    def delete_policy_association(self, policy_id, endpoint_id=None,
+                                  service_id=None, region_id=None):
+        with sql.transaction() as session:
+            query = session.query(PolicyAssociation)
+            query = query.filter_by(policy_id=policy_id)
+            query = query.filter_by(endpoint_id=endpoint_id)
+            query = query.filter_by(service_id=service_id)
+            query = query.filter_by(region_id=region_id)
+            query.delete()
+
+    def get_policy_association(self, endpoint_id=None,
+                               service_id=None, region_id=None):
+        sql_constraints = sqlalchemy.and_(
+            PolicyAssociation.endpoint_id == endpoint_id,
+            PolicyAssociation.service_id == service_id,
+            PolicyAssociation.region_id == region_id)
+
+        try:
+            with sql.transaction() as session:
+                policy_id = session.query(PolicyAssociation.policy_id).filter(
+                    sql_constraints).distinct().one()
+            return {'policy_id': policy_id}
+        except sql.NotFound:
+            raise exception.PolicyAssociationNotFound()
+
+    def list_associations_for_policy(self, policy_id):
+        with sql.transaction() as session:
+            query = session.query(PolicyAssociation)
+            query = query.filter_by(policy_id=policy_id)
+            return [ref.to_dict() for ref in query.all()]
+
+    def delete_association_by_endpoint(self, endpoint_id):
+        with sql.transaction() as session:
+            query = session.query(PolicyAssociation)
+            query = query.filter_by(endpoint_id=endpoint_id)
+            query.delete()
+
+    def delete_association_by_service(self, service_id):
+        with sql.transaction() as session:
+            query = session.query(PolicyAssociation)
+            query = query.filter_by(service_id=service_id)
+            query.delete()
+
+    def delete_association_by_region(self, region_id):
+        with sql.transaction() as session:
+            query = session.query(PolicyAssociation)
+            query = query.filter_by(region_id=region_id)
+            query.delete()
+
+    def delete_association_by_policy(self, policy_id):
+        with sql.transaction() as session:
+            query = session.query(PolicyAssociation)
+            query = query.filter_by(policy_id=policy_id)
+            query.delete()
diff --git a/keystone-moon/keystone/contrib/endpoint_policy/controllers.py b/keystone-moon/keystone/contrib/endpoint_policy/controllers.py
new file mode 100644 (file)
index 0000000..b96834d
--- /dev/null
@@ -0,0 +1,166 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import controller
+from keystone.common import dependency
+from keystone import notifications
+
+
+@dependency.requires('policy_api', 'catalog_api', 'endpoint_policy_api')
+class EndpointPolicyV3Controller(controller.V3Controller):
+    collection_name = 'endpoints'
+    member_name = 'endpoint'
+
+    def __init__(self):
+        super(EndpointPolicyV3Controller, self).__init__()
+        notifications.register_event_callback(
+            'deleted', 'endpoint', self._on_endpoint_delete)
+        notifications.register_event_callback(
+            'deleted', 'service', self._on_service_delete)
+        notifications.register_event_callback(
+            'deleted', 'region', self._on_region_delete)
+        notifications.register_event_callback(
+            'deleted', 'policy', self._on_policy_delete)
+
+    def _on_endpoint_delete(self, service, resource_type, operation, payload):
+        self.endpoint_policy_api.delete_association_by_endpoint(
+            payload['resource_info'])
+
+    def _on_service_delete(self, service, resource_type, operation, payload):
+        self.endpoint_policy_api.delete_association_by_service(
+            payload['resource_info'])
+
+    def _on_region_delete(self, service, resource_type, operation, payload):
+        self.endpoint_policy_api.delete_association_by_region(
+            payload['resource_info'])
+
+    def _on_policy_delete(self, service, resource_type, operation, payload):
+        self.endpoint_policy_api.delete_association_by_policy(
+            payload['resource_info'])
+
+    @controller.protected()
+    def create_policy_association_for_endpoint(self, context,
+                                               policy_id, endpoint_id):
+        """Create an association between a policy and an endpoint."""
+        self.policy_api.get_policy(policy_id)
+        self.catalog_api.get_endpoint(endpoint_id)
+        self.endpoint_policy_api.create_policy_association(
+            policy_id, endpoint_id=endpoint_id)
+
+    @controller.protected()
+    def check_policy_association_for_endpoint(self, context,
+                                              policy_id, endpoint_id):
+        """Check an association between a policy and an endpoint."""
+        self.policy_api.get_policy(policy_id)
+        self.catalog_api.get_endpoint(endpoint_id)
+        self.endpoint_policy_api.check_policy_association(
+            policy_id, endpoint_id=endpoint_id)
+
+    @controller.protected()
+    def delete_policy_association_for_endpoint(self, context,
+                                               policy_id, endpoint_id):
+        """Delete an association between a policy and an endpoint."""
+        self.policy_api.get_policy(policy_id)
+        self.catalog_api.get_endpoint(endpoint_id)
+        self.endpoint_policy_api.delete_policy_association(
+            policy_id, endpoint_id=endpoint_id)
+
+    @controller.protected()
+    def create_policy_association_for_service(self, context,
+                                              policy_id, service_id):
+        """Create an association between a policy and a service."""
+        self.policy_api.get_policy(policy_id)
+        self.catalog_api.get_service(service_id)
+        self.endpoint_policy_api.create_policy_association(
+            policy_id, service_id=service_id)
+
+    @controller.protected()
+    def check_policy_association_for_service(self, context,
+                                             policy_id, service_id):
+        """Check an association between a policy and a service."""
+        self.policy_api.get_policy(policy_id)
+        self.catalog_api.get_service(service_id)
+        self.endpoint_policy_api.check_policy_association(
+            policy_id, service_id=service_id)
+
+    @controller.protected()
+    def delete_policy_association_for_service(self, context,
+                                              policy_id, service_id):
+        """Delete an association between a policy and a service."""
+        self.policy_api.get_policy(policy_id)
+        self.catalog_api.get_service(service_id)
+        self.endpoint_policy_api.delete_policy_association(
+            policy_id, service_id=service_id)
+
+    @controller.protected()
+    def create_policy_association_for_region_and_service(
+            self, context, policy_id, service_id, region_id):
+        """Create an association between a policy and region+service."""
+        self.policy_api.get_policy(policy_id)
+        self.catalog_api.get_service(service_id)
+        self.catalog_api.get_region(region_id)
+        self.endpoint_policy_api.create_policy_association(
+            policy_id, service_id=service_id, region_id=region_id)
+
+    @controller.protected()
+    def check_policy_association_for_region_and_service(
+            self, context, policy_id, service_id, region_id):
+        """Check an association between a policy and region+service."""
+        self.policy_api.get_policy(policy_id)
+        self.catalog_api.get_service(service_id)
+        self.catalog_api.get_region(region_id)
+        self.endpoint_policy_api.check_policy_association(
+            policy_id, service_id=service_id, region_id=region_id)
+
+    @controller.protected()
+    def delete_policy_association_for_region_and_service(
+            self, context, policy_id, service_id, region_id):
+        """Delete an association between a policy and region+service."""
+        self.policy_api.get_policy(policy_id)
+        self.catalog_api.get_service(service_id)
+        self.catalog_api.get_region(region_id)
+        self.endpoint_policy_api.delete_policy_association(
+            policy_id, service_id=service_id, region_id=region_id)
+
+    @controller.protected()
+    def get_policy_for_endpoint(self, context, endpoint_id):
+        """Get the effective policy for an endpoint."""
+        self.catalog_api.get_endpoint(endpoint_id)
+        ref = self.endpoint_policy_api.get_policy_for_endpoint(endpoint_id)
+        # NOTE(henry-nash): since the collection and member for this class is
+        # set to endpoints, we have to handle wrapping this policy entity
+        # ourselves.
+        self._add_self_referential_link(context, ref)
+        return {'policy': ref}
+
+    # NOTE(henry-nash): As in the catalog controller, we must ensure that the
+    # legacy_endpoint_id does not escape.
+
+    @classmethod
+    def filter_endpoint(cls, ref):
+        if 'legacy_endpoint_id' in ref:
+            ref.pop('legacy_endpoint_id')
+        return ref
+
+    @classmethod
+    def wrap_member(cls, context, ref):
+        ref = cls.filter_endpoint(ref)
+        return super(EndpointPolicyV3Controller, cls).wrap_member(context, ref)
+
+    @controller.protected()
+    def list_endpoints_for_policy(self, context, policy_id):
+        """List endpoints with the effective association to a policy."""
+        self.policy_api.get_policy(policy_id)
+        refs = self.endpoint_policy_api.list_endpoints_for_policy(policy_id)
+        return EndpointPolicyV3Controller.wrap_collection(context, refs)
diff --git a/keystone-moon/keystone/contrib/endpoint_policy/core.py b/keystone-moon/keystone/contrib/endpoint_policy/core.py
new file mode 100644 (file)
index 0000000..1aa0326
--- /dev/null
@@ -0,0 +1,430 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+
+from oslo_config import cfg
+from oslo_log import log
+import six
+
+from keystone.common import dependency
+from keystone.common import manager
+from keystone import exception
+from keystone.i18n import _, _LE, _LW
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+@dependency.provider('endpoint_policy_api')
+@dependency.requires('catalog_api', 'policy_api')
+class Manager(manager.Manager):
+    """Default pivot point for the Endpoint Policy backend.
+
+    See :mod:`keystone.common.manager.Manager` for more details on how this
+    dynamically calls the backend.
+
+    """
+
+    def __init__(self):
+        super(Manager, self).__init__(CONF.endpoint_policy.driver)
+
+    def _assert_valid_association(self, endpoint_id, service_id, region_id):
+        """Assert that the association is supported.
+
+        There are three types of association supported:
+
+        - Endpoint (in which case service and region must be None)
+        - Service and region (in which endpoint must be None)
+        - Service (in which case endpoint and region must be None)
+
+        """
+        if (endpoint_id is not None and
+                service_id is None and region_id is None):
+            return
+        if (service_id is not None and region_id is not None and
+                endpoint_id is None):
+            return
+        if (service_id is not None and
+                endpoint_id is None and region_id is None):
+            return
+
+        raise exception.InvalidPolicyAssociation(endpoint_id=endpoint_id,
+                                                 service_id=service_id,
+                                                 region_id=region_id)
+
+    def create_policy_association(self, policy_id, endpoint_id=None,
+                                  service_id=None, region_id=None):
+        self._assert_valid_association(endpoint_id, service_id, region_id)
+        self.driver.create_policy_association(policy_id, endpoint_id,
+                                              service_id, region_id)
+
+    def check_policy_association(self, policy_id, endpoint_id=None,
+                                 service_id=None, region_id=None):
+        self._assert_valid_association(endpoint_id, service_id, region_id)
+        self.driver.check_policy_association(policy_id, endpoint_id,
+                                             service_id, region_id)
+
+    def delete_policy_association(self, policy_id, endpoint_id=None,
+                                  service_id=None, region_id=None):
+        self._assert_valid_association(endpoint_id, service_id, region_id)
+        self.driver.delete_policy_association(policy_id, endpoint_id,
+                                              service_id, region_id)
+
+    def list_endpoints_for_policy(self, policy_id):
+
+        def _get_endpoint(endpoint_id, policy_id):
+            try:
+                return self.catalog_api.get_endpoint(endpoint_id)
+            except exception.EndpointNotFound:
+                msg = _LW('Endpoint %(endpoint_id)s referenced in '
+                          'association for policy %(policy_id)s not found.')
+                LOG.warning(msg, {'policy_id': policy_id,
+                                  'endpoint_id': endpoint_id})
+                raise
+
+        def _get_endpoints_for_service(service_id, endpoints):
+            # TODO(henry-nash): Consider optimizing this in the future by
+            # adding an explicit list_endpoints_for_service to the catalog API.
+            return [ep for ep in endpoints if ep['service_id'] == service_id]
+
+        def _get_endpoints_for_service_and_region(
+                service_id, region_id, endpoints, regions):
+            # TODO(henry-nash): Consider optimizing this in the future.
+            # The lack of a two-way pointer in the region tree structure
+            # makes this somewhat inefficient.
+
+            def _recursively_get_endpoints_for_region(
+                region_id, service_id, endpoint_list, region_list,
+                    endpoints_found, regions_examined):
+                """Recursively search down a region tree for endpoints.
+
+                :param region_id: the point in the tree to examine
+                :param service_id: the service we are interested in
+                :param endpoint_list: list of all endpoints
+                :param region_list: list of all regions
+                :param endpoints_found: list of matching endpoints found so
+                                        far - which will be updated if more are
+                                        found in this iteration
+                :param regions_examined: list of regions we have already looked
+                                         at - used to spot illegal circular
+                                         references in the tree to avoid never
+                                         completing search
+                :returns: list of endpoints that match
+
+                """
+
+                if region_id in regions_examined:
+                    msg = _LE('Circular reference or a repeated entry found '
+                              'in region tree - %(region_id)s.')
+                    LOG.error(msg, {'region_id': ref.region_id})
+                    return
+
+                regions_examined.append(region_id)
+                endpoints_found += (
+                    [ep for ep in endpoint_list if
+                     ep['service_id'] == service_id and
+                     ep['region_id'] == region_id])
+
+                for region in region_list:
+                    if region['parent_region_id'] == region_id:
+                        _recursively_get_endpoints_for_region(
+                            region['id'], service_id, endpoints, regions,
+                            endpoints_found, regions_examined)
+
+            endpoints_found = []
+            regions_examined = []
+
+            # Now walk down the region tree
+            _recursively_get_endpoints_for_region(
+                region_id, service_id, endpoints, regions,
+                endpoints_found, regions_examined)
+
+            return endpoints_found
+
+        matching_endpoints = []
+        endpoints = self.catalog_api.list_endpoints()
+        regions = self.catalog_api.list_regions()
+        for ref in self.driver.list_associations_for_policy(policy_id):
+            if ref.get('endpoint_id') is not None:
+                matching_endpoints.append(
+                    _get_endpoint(ref['endpoint_id'], policy_id))
+                continue
+
+            if (ref.get('service_id') is not None and
+                    ref.get('region_id') is None):
+                matching_endpoints += _get_endpoints_for_service(
+                    ref['service_id'], endpoints)
+                continue
+
+            if (ref.get('service_id') is not None and
+                    ref.get('region_id') is not None):
+                matching_endpoints += (
+                    _get_endpoints_for_service_and_region(
+                        ref['service_id'], ref['region_id'],
+                        endpoints, regions))
+                continue
+
+            msg = _LW('Unsupported policy association found - '
+                      'Policy %(policy_id)s, Endpoint %(endpoint_id)s, '
+                      'Service %(service_id)s, Region %(region_id)s, ')
+            LOG.warning(msg, {'policy_id': policy_id,
+                              'endpoint_id': ref['endpoint_id'],
+                              'service_id': ref['service_id'],
+                              'region_id': ref['region_id']})
+
+        return matching_endpoints
+
+    def get_policy_for_endpoint(self, endpoint_id):
+
+        def _get_policy(policy_id, endpoint_id):
+            try:
+                return self.policy_api.get_policy(policy_id)
+            except exception.PolicyNotFound:
+                msg = _LW('Policy %(policy_id)s referenced in association '
+                          'for endpoint %(endpoint_id)s not found.')
+                LOG.warning(msg, {'policy_id': policy_id,
+                                  'endpoint_id': endpoint_id})
+                raise
+
+        def _look_for_policy_for_region_and_service(endpoint):
+            """Look in the region and its parents for a policy.
+
+            Examine the region of the endpoint for a policy appropriate for
+            the service of the endpoint. If there isn't a match, then chase up
+            the region tree to find one.
+
+            """
+            region_id = endpoint['region_id']
+            regions_examined = []
+            while region_id is not None:
+                try:
+                    ref = self.driver.get_policy_association(
+                        service_id=endpoint['service_id'],
+                        region_id=region_id)
+                    return ref['policy_id']
+                except exception.PolicyAssociationNotFound:
+                    pass
+
+                # There wasn't one for that region & service, let's
+                # chase up the region tree
+                regions_examined.append(region_id)
+                region = self.catalog_api.get_region(region_id)
+                region_id = None
+                if region.get('parent_region_id') is not None:
+                    region_id = region['parent_region_id']
+                    if region_id in regions_examined:
+                        msg = _LE('Circular reference or a repeated entry '
+                                  'found in region tree - %(region_id)s.')
+                        LOG.error(msg, {'region_id': region_id})
+                        break
+
+        # First let's see if there is a policy explicitly defined for
+        # this endpoint.
+
+        try:
+            ref = self.driver.get_policy_association(endpoint_id=endpoint_id)
+            return _get_policy(ref['policy_id'], endpoint_id)
+        except exception.PolicyAssociationNotFound:
+            pass
+
+        # There wasn't a policy explicitly defined for this endpoint, so
+        # now let's see if there is one for the Region & Service.
+
+        endpoint = self.catalog_api.get_endpoint(endpoint_id)
+        policy_id = _look_for_policy_for_region_and_service(endpoint)
+        if policy_id is not None:
+            return _get_policy(policy_id, endpoint_id)
+
+        # Finally, just check if there is one for the service.
+        try:
+            ref = self.driver.get_policy_association(
+                service_id=endpoint['service_id'])
+            return _get_policy(ref['policy_id'], endpoint_id)
+        except exception.PolicyAssociationNotFound:
+            pass
+
+        msg = _('No policy is associated with endpoint '
+                '%(endpoint_id)s.') % {'endpoint_id': endpoint_id}
+        raise exception.NotFound(msg)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Driver(object):
+    """Interface description for an Endpoint Policy driver."""
+
+    @abc.abstractmethod
+    def create_policy_association(self, policy_id, endpoint_id=None,
+                                  service_id=None, region_id=None):
+        """Creates a policy association.
+
+        :param policy_id: identity of policy that is being associated
+        :type policy_id: string
+        :param endpoint_id: identity of endpoint to associate
+        :type endpoint_id: string
+        :param service_id: identity of the service to associate
+        :type service_id: string
+        :param region_id: identity of the region to associate
+        :type region_id: string
+        :returns: None
+
+        There are three types of association permitted:
+
+        - Endpoint (in which case service and region must be None)
+        - Service and region (in which endpoint must be None)
+        - Service (in which case endpoint and region must be None)
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def check_policy_association(self, policy_id, endpoint_id=None,
+                                 service_id=None, region_id=None):
+        """Checks existence a policy association.
+
+        :param policy_id: identity of policy that is being associated
+        :type policy_id: string
+        :param endpoint_id: identity of endpoint to associate
+        :type endpoint_id: string
+        :param service_id: identity of the service to associate
+        :type service_id: string
+        :param region_id: identity of the region to associate
+        :type region_id: string
+        :raises: keystone.exception.PolicyAssociationNotFound if there is no
+                 match for the specified association
+        :returns: None
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_policy_association(self, policy_id, endpoint_id=None,
+                                  service_id=None, region_id=None):
+        """Deletes a policy association.
+
+        :param policy_id: identity of policy that is being associated
+        :type policy_id: string
+        :param endpoint_id: identity of endpoint to associate
+        :type endpoint_id: string
+        :param service_id: identity of the service to associate
+        :type service_id: string
+        :param region_id: identity of the region to associate
+        :type region_id: string
+        :returns: None
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_policy_association(self, endpoint_id=None,
+                               service_id=None, region_id=None):
+        """Gets the policy for an explicit association.
+
+        This method is not exposed as a public API, but is used by
+        get_policy_for_endpoint().
+
+        :param endpoint_id: identity of endpoint
+        :type endpoint_id: string
+        :param service_id: identity of the service
+        :type service_id: string
+        :param region_id: identity of the region
+        :type region_id: string
+        :raises: keystone.exception.PolicyAssociationNotFound if there is no
+                 match for the specified association
+        :returns: dict containing policy_id
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_associations_for_policy(self, policy_id):
+        """List the associations for a policy.
+
+        This method is not exposed as a public API, but is used by
+        list_endpoints_for_policy().
+
+        :param policy_id: identity of policy
+        :type policy_id: string
+        :returns: List of association dicts
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_endpoints_for_policy(self, policy_id):
+        """List all the endpoints using a given policy.
+
+        :param policy_id: identity of policy that is being associated
+        :type policy_id: string
+        :returns: list of endpoints that have an effective association with
+                  that policy
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_policy_for_endpoint(self, endpoint_id):
+        """Get the appropriate policy for a given endpoint.
+
+        :param endpoint_id: identity of endpoint
+        :type endpoint_id: string
+        :returns: Policy entity for the endpoint
+
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_association_by_endpoint(self, endpoint_id):
+        """Removes all the policy associations with the specific endpoint.
+
+        :param endpoint_id: identity of endpoint to check
+        :type endpoint_id: string
+        :returns: None
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_association_by_service(self, service_id):
+        """Removes all the policy associations with the specific service.
+
+        :param service_id: identity of endpoint to check
+        :type service_id: string
+        :returns: None
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_association_by_region(self, region_id):
+        """Removes all the policy associations with the specific region.
+
+        :param region_id: identity of endpoint to check
+        :type region_id: string
+        :returns: None
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_association_by_policy(self, policy_id):
+        """Removes all the policy associations with the specific policy.
+
+        :param policy_id: identity of endpoint to check
+        :type policy_id: string
+        :returns: None
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
diff --git a/keystone-moon/keystone/contrib/endpoint_policy/migrate_repo/__init__.py b/keystone-moon/keystone/contrib/endpoint_policy/migrate_repo/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/contrib/endpoint_policy/migrate_repo/migrate.cfg b/keystone-moon/keystone/contrib/endpoint_policy/migrate_repo/migrate.cfg
new file mode 100644 (file)
index 0000000..62895d6
--- /dev/null
@@ -0,0 +1,25 @@
+[db_settings]
+# Used to identify which repository this database is versioned under.
+# You can use the name of your project.
+repository_id=endpoint_policy
+
+# The name of the database table used to track the schema version.
+# This name shouldn't already be used by your project.
+# If this is changed once a database is under version control, you'll need to
+# change the table name in each database too.
+version_table=migrate_version
+
+# When committing a change script, Migrate will attempt to generate the
+# sql for all supported databases; normally, if one of them fails - probably
+# because you don't have that database installed - it is ignored and the
+# commit continues, perhaps ending successfully.
+# Databases in this list MUST compile successfully during a commit, or the
+# entire commit will fail. List the databases your application will actually
+# be using to ensure your updates to that database work properly.
+# This must be a list; example: ['postgres','sqlite']
+required_dbs=[]
+
+# When creating new change scripts, Migrate will stamp the new script with
+# a version number. By default this is latest_version + 1. You can set this
+# to 'true' to tell Migrate to use the UTC timestamp instead.
+use_timestamp_numbering=False
diff --git a/keystone-moon/keystone/contrib/endpoint_policy/migrate_repo/versions/001_add_endpoint_policy_table.py b/keystone-moon/keystone/contrib/endpoint_policy/migrate_repo/versions/001_add_endpoint_policy_table.py
new file mode 100644 (file)
index 0000000..c77e438
--- /dev/null
@@ -0,0 +1,48 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+
+def upgrade(migrate_engine):
+    # Upgrade operations go here. Don't create your own engine; bind
+    # migrate_engine to your metadata
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    endpoint_policy_table = sql.Table(
+        'policy_association',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('policy_id', sql.String(64),
+                   nullable=False),
+        sql.Column('endpoint_id', sql.String(64),
+                   nullable=True),
+        sql.Column('service_id', sql.String(64),
+                   nullable=True),
+        sql.Column('region_id', sql.String(64),
+                   nullable=True),
+        sql.UniqueConstraint('endpoint_id', 'service_id', 'region_id'),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+
+    endpoint_policy_table.create(migrate_engine, checkfirst=True)
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+    # Operations to reverse the above upgrade go here.
+    table = sql.Table('policy_association', meta, autoload=True)
+    table.drop()
diff --git a/keystone-moon/keystone/contrib/endpoint_policy/migrate_repo/versions/__init__.py b/keystone-moon/keystone/contrib/endpoint_policy/migrate_repo/versions/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/contrib/endpoint_policy/routers.py b/keystone-moon/keystone/contrib/endpoint_policy/routers.py
new file mode 100644 (file)
index 0000000..999d1ee
--- /dev/null
@@ -0,0 +1,85 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+
+from keystone.common import json_home
+from keystone.common import wsgi
+from keystone.contrib.endpoint_policy import controllers
+
+
+build_resource_relation = functools.partial(
+    json_home.build_v3_extension_resource_relation,
+    extension_name='OS-ENDPOINT-POLICY', extension_version='1.0')
+
+
+class EndpointPolicyExtension(wsgi.V3ExtensionRouter):
+
+    PATH_PREFIX = '/OS-ENDPOINT-POLICY'
+
+    def add_routes(self, mapper):
+        endpoint_policy_controller = controllers.EndpointPolicyV3Controller()
+
+        self._add_resource(
+            mapper, endpoint_policy_controller,
+            path='/endpoints/{endpoint_id}' + self.PATH_PREFIX + '/policy',
+            get_head_action='get_policy_for_endpoint',
+            rel=build_resource_relation(resource_name='endpoint_policy'),
+            path_vars={'endpoint_id': json_home.Parameters.ENDPOINT_ID})
+        self._add_resource(
+            mapper, endpoint_policy_controller,
+            path='/policies/{policy_id}' + self.PATH_PREFIX + '/endpoints',
+            get_action='list_endpoints_for_policy',
+            rel=build_resource_relation(resource_name='policy_endpoints'),
+            path_vars={'policy_id': json_home.Parameters.POLICY_ID})
+        self._add_resource(
+            mapper, endpoint_policy_controller,
+            path=('/policies/{policy_id}' + self.PATH_PREFIX +
+                  '/endpoints/{endpoint_id}'),
+            get_head_action='check_policy_association_for_endpoint',
+            put_action='create_policy_association_for_endpoint',
+            delete_action='delete_policy_association_for_endpoint',
+            rel=build_resource_relation(
+                resource_name='endpoint_policy_association'),
+            path_vars={
+                'policy_id': json_home.Parameters.POLICY_ID,
+                'endpoint_id': json_home.Parameters.ENDPOINT_ID,
+            })
+        self._add_resource(
+            mapper, endpoint_policy_controller,
+            path=('/policies/{policy_id}' + self.PATH_PREFIX +
+                  '/services/{service_id}'),
+            get_head_action='check_policy_association_for_service',
+            put_action='create_policy_association_for_service',
+            delete_action='delete_policy_association_for_service',
+            rel=build_resource_relation(
+                resource_name='service_policy_association'),
+            path_vars={
+                'policy_id': json_home.Parameters.POLICY_ID,
+                'service_id': json_home.Parameters.SERVICE_ID,
+            })
+        self._add_resource(
+            mapper, endpoint_policy_controller,
+            path=('/policies/{policy_id}' + self.PATH_PREFIX +
+                  '/services/{service_id}/regions/{region_id}'),
+            get_head_action='check_policy_association_for_region_and_service',
+            put_action='create_policy_association_for_region_and_service',
+            delete_action='delete_policy_association_for_region_and_service',
+            rel=build_resource_relation(
+                resource_name='region_and_service_policy_association'),
+            path_vars={
+                'policy_id': json_home.Parameters.POLICY_ID,
+                'service_id': json_home.Parameters.SERVICE_ID,
+                'region_id': json_home.Parameters.REGION_ID,
+            })
diff --git a/keystone-moon/keystone/contrib/example/__init__.py b/keystone-moon/keystone/contrib/example/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/contrib/example/configuration.rst b/keystone-moon/keystone/contrib/example/configuration.rst
new file mode 100644 (file)
index 0000000..979d345
--- /dev/null
@@ -0,0 +1,31 @@
+..
+      Copyright 2013 OpenStack, Foundation
+      All Rights Reserved.
+
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+=================
+Extension Example
+=================
+
+Please describe here in details how to enable your extension:
+
+1. Add the required fields and values in the ``[example]`` section
+   in ``keystone.conf``.
+
+2. Optional: add the required ``filter`` to the ``pipeline`` in ``keystone-paste.ini``
+
+3. Optional: create the extension tables if using the provided sql backend. Example::
+
+
+    ./bin/keystone-manage db_sync --extension example
\ No newline at end of file
diff --git a/keystone-moon/keystone/contrib/example/controllers.py b/keystone-moon/keystone/contrib/example/controllers.py
new file mode 100644 (file)
index 0000000..95b3e82
--- /dev/null
@@ -0,0 +1,26 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from keystone.common import controller
+from keystone.common import dependency
+
+
+@dependency.requires('example_api')
+class ExampleV3Controller(controller.V3Controller):
+
+    @controller.protected()
+    def example_get(self, context):
+        """Description of the controller logic."""
+        self.example_api.do_something(context)
diff --git a/keystone-moon/keystone/contrib/example/core.py b/keystone-moon/keystone/contrib/example/core.py
new file mode 100644 (file)
index 0000000..6e85c7f
--- /dev/null
@@ -0,0 +1,92 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log
+
+from keystone.common import dependency
+from keystone.common import manager
+from keystone import exception
+from keystone.i18n import _LI
+from keystone import notifications
+
+
+LOG = log.getLogger(__name__)
+
+
+@dependency.provider('example_api')
+class ExampleManager(manager.Manager):
+    """Example Manager.
+
+    See :mod:`keystone.common.manager.Manager` for more details on
+    how this dynamically calls the backend.
+
+    """
+
+    def __init__(self):
+        # The following is an example of event callbacks. In this setup,
+        # ExampleManager's data model is depended on project's data model.
+        # It must create additional aggregates when a new project is created,
+        # and it must cleanup data related to the project whenever a project
+        # has been deleted.
+        #
+        # In this example, the project_deleted_callback will be invoked
+        # whenever a project has been deleted. Similarly, the
+        # project_created_callback will be invoked whenever a new project is
+        # created.
+
+        # This information is used when the @dependency.provider decorator acts
+        # on the class.
+        self.event_callbacks = {
+            notifications.ACTIONS.deleted: {
+                'project': [self.project_deleted_callback],
+            },
+            notifications.ACTIONS.created: {
+                'project': [self.project_created_callback],
+            },
+        }
+        super(ExampleManager, self).__init__(
+            'keystone.contrib.example.core.ExampleDriver')
+
+    def project_deleted_callback(self, service, resource_type, operation,
+                                 payload):
+        # The code below is merely an example.
+        msg = _LI('Received the following notification: service %(service)s, '
+                  'resource_type: %(resource_type)s, operation %(operation)s '
+                  'payload %(payload)s')
+        LOG.info(msg, {'service': service, 'resource_type': resource_type,
+                       'operation': operation, 'payload': payload})
+
+    def project_created_callback(self, service, resource_type, operation,
+                                 payload):
+        # The code below is merely an example.
+        msg = _LI('Received the following notification: service %(service)s, '
+                  'resource_type: %(resource_type)s, operation %(operation)s '
+                  'payload %(payload)s')
+        LOG.info(msg, {'service': service, 'resource_type': resource_type,
+                       'operation': operation, 'payload': payload})
+
+
+class ExampleDriver(object):
+    """Interface description for Example driver."""
+
+    def do_something(self, data):
+        """Do something
+
+        :param data: example data
+        :type data: string
+        :raises: keystone.exception,
+        :returns: None.
+
+        """
+        raise exception.NotImplemented()
diff --git a/keystone-moon/keystone/contrib/example/migrate_repo/__init__.py b/keystone-moon/keystone/contrib/example/migrate_repo/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/contrib/example/migrate_repo/migrate.cfg b/keystone-moon/keystone/contrib/example/migrate_repo/migrate.cfg
new file mode 100644 (file)
index 0000000..5b1b1c0
--- /dev/null
@@ -0,0 +1,25 @@
+[db_settings]
+# Used to identify which repository this database is versioned under.
+# You can use the name of your project.
+repository_id=example
+
+# The name of the database table used to track the schema version.
+# This name shouldn't already be used by your project.
+# If this is changed once a database is under version control, you'll need to
+# change the table name in each database too.
+version_table=migrate_version
+
+# When committing a change script, Migrate will attempt to generate the
+# sql for all supported databases; normally, if one of them fails - probably
+# because you don't have that database installed - it is ignored and the
+# commit continues, perhaps ending successfully.
+# Databases in this list MUST compile successfully during a commit, or the
+# entire commit will fail. List the databases your application will actually
+# be using to ensure your updates to that database work properly.
+# This must be a list; example: ['postgres','sqlite']
+required_dbs=[]
+
+# When creating new change scripts, Migrate will stamp the new script with
+# a version number. By default this is latest_version + 1. You can set this
+# to 'true' to tell Migrate to use the UTC timestamp instead.
+use_timestamp_numbering=False
diff --git a/keystone-moon/keystone/contrib/example/migrate_repo/versions/001_example_table.py b/keystone-moon/keystone/contrib/example/migrate_repo/versions/001_example_table.py
new file mode 100644 (file)
index 0000000..10b7ccc
--- /dev/null
@@ -0,0 +1,43 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+
+def upgrade(migrate_engine):
+    # Upgrade operations go here. Don't create your own engine; bind
+    # migrate_engine to your metadata
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    # catalog
+
+    service_table = sql.Table(
+        'example',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('type', sql.String(255)),
+        sql.Column('extra', sql.Text()))
+    service_table.create(migrate_engine, checkfirst=True)
+
+
+def downgrade(migrate_engine):
+    # Operations to reverse the above upgrade go here.
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    tables = ['example']
+    for t in tables:
+        table = sql.Table(t, meta, autoload=True)
+        table.drop(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/contrib/example/migrate_repo/versions/__init__.py b/keystone-moon/keystone/contrib/example/migrate_repo/versions/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/contrib/example/routers.py b/keystone-moon/keystone/contrib/example/routers.py
new file mode 100644 (file)
index 0000000..30cffe1
--- /dev/null
@@ -0,0 +1,38 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+
+from keystone.common import json_home
+from keystone.common import wsgi
+from keystone.contrib.example import controllers
+
+
+build_resource_relation = functools.partial(
+    json_home.build_v3_extension_resource_relation,
+    extension_name='OS-EXAMPLE', extension_version='1.0')
+
+
+class ExampleRouter(wsgi.V3ExtensionRouter):
+
+    PATH_PREFIX = '/OS-EXAMPLE'
+
+    def add_routes(self, mapper):
+        example_controller = controllers.ExampleV3Controller()
+
+        self._add_resource(
+            mapper, example_controller,
+            path=self.PATH_PREFIX + '/example',
+            get_action='do_something',
+            rel=build_resource_relation(resource_name='example'))
diff --git a/keystone-moon/keystone/contrib/federation/__init__.py b/keystone-moon/keystone/contrib/federation/__init__.py
new file mode 100644 (file)
index 0000000..57c9e42
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright 2014 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.contrib.federation.core import *  # noqa
diff --git a/keystone-moon/keystone/contrib/federation/backends/__init__.py b/keystone-moon/keystone/contrib/federation/backends/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/contrib/federation/backends/sql.py b/keystone-moon/keystone/contrib/federation/backends/sql.py
new file mode 100644 (file)
index 0000000..f2c124d
--- /dev/null
@@ -0,0 +1,315 @@
+# Copyright 2014 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_serialization import jsonutils
+
+from keystone.common import sql
+from keystone.contrib.federation import core
+from keystone import exception
+
+
+class FederationProtocolModel(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'federation_protocol'
+    attributes = ['id', 'idp_id', 'mapping_id']
+    mutable_attributes = frozenset(['mapping_id'])
+
+    id = sql.Column(sql.String(64), primary_key=True)
+    idp_id = sql.Column(sql.String(64), sql.ForeignKey('identity_provider.id',
+                        ondelete='CASCADE'), primary_key=True)
+    mapping_id = sql.Column(sql.String(64), nullable=False)
+
+    @classmethod
+    def from_dict(cls, dictionary):
+        new_dictionary = dictionary.copy()
+        return cls(**new_dictionary)
+
+    def to_dict(self):
+        """Return a dictionary with model's attributes."""
+        d = dict()
+        for attr in self.__class__.attributes:
+            d[attr] = getattr(self, attr)
+        return d
+
+
+class IdentityProviderModel(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'identity_provider'
+    attributes = ['id', 'remote_id', 'enabled', 'description']
+    mutable_attributes = frozenset(['description', 'enabled', 'remote_id'])
+
+    id = sql.Column(sql.String(64), primary_key=True)
+    remote_id = sql.Column(sql.String(256), nullable=True)
+    enabled = sql.Column(sql.Boolean, nullable=False)
+    description = sql.Column(sql.Text(), nullable=True)
+
+    @classmethod
+    def from_dict(cls, dictionary):
+        new_dictionary = dictionary.copy()
+        return cls(**new_dictionary)
+
+    def to_dict(self):
+        """Return a dictionary with model's attributes."""
+        d = dict()
+        for attr in self.__class__.attributes:
+            d[attr] = getattr(self, attr)
+        return d
+
+
+class MappingModel(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'mapping'
+    attributes = ['id', 'rules']
+
+    id = sql.Column(sql.String(64), primary_key=True)
+    rules = sql.Column(sql.JsonBlob(), nullable=False)
+
+    @classmethod
+    def from_dict(cls, dictionary):
+        new_dictionary = dictionary.copy()
+        return cls(**new_dictionary)
+
+    def to_dict(self):
+        """Return a dictionary with model's attributes."""
+        d = dict()
+        for attr in self.__class__.attributes:
+            d[attr] = getattr(self, attr)
+        return d
+
+
+class ServiceProviderModel(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'service_provider'
+    attributes = ['auth_url', 'id', 'enabled', 'description', 'sp_url']
+    mutable_attributes = frozenset(['auth_url', 'description', 'enabled',
+                                    'sp_url'])
+
+    id = sql.Column(sql.String(64), primary_key=True)
+    enabled = sql.Column(sql.Boolean, nullable=False)
+    description = sql.Column(sql.Text(), nullable=True)
+    auth_url = sql.Column(sql.String(256), nullable=False)
+    sp_url = sql.Column(sql.String(256), nullable=False)
+
+    @classmethod
+    def from_dict(cls, dictionary):
+        new_dictionary = dictionary.copy()
+        return cls(**new_dictionary)
+
+    def to_dict(self):
+        """Return a dictionary with model's attributes."""
+        d = dict()
+        for attr in self.__class__.attributes:
+            d[attr] = getattr(self, attr)
+        return d
+
+
+class Federation(core.Driver):
+
+    # Identity Provider CRUD
+    @sql.handle_conflicts(conflict_type='identity_provider')
+    def create_idp(self, idp_id, idp):
+        idp['id'] = idp_id
+        with sql.transaction() as session:
+            idp_ref = IdentityProviderModel.from_dict(idp)
+            session.add(idp_ref)
+        return idp_ref.to_dict()
+
+    def delete_idp(self, idp_id):
+        with sql.transaction() as session:
+            idp_ref = self._get_idp(session, idp_id)
+            session.delete(idp_ref)
+
+    def _get_idp(self, session, idp_id):
+        idp_ref = session.query(IdentityProviderModel).get(idp_id)
+        if not idp_ref:
+            raise exception.IdentityProviderNotFound(idp_id=idp_id)
+        return idp_ref
+
+    def _get_idp_from_remote_id(self, session, remote_id):
+        q = session.query(IdentityProviderModel)
+        q = q.filter_by(remote_id=remote_id)
+        try:
+            return q.one()
+        except sql.NotFound:
+            raise exception.IdentityProviderNotFound(idp_id=remote_id)
+
+    def list_idps(self):
+        with sql.transaction() as session:
+            idps = session.query(IdentityProviderModel)
+        idps_list = [idp.to_dict() for idp in idps]
+        return idps_list
+
+    def get_idp(self, idp_id):
+        with sql.transaction() as session:
+            idp_ref = self._get_idp(session, idp_id)
+        return idp_ref.to_dict()
+
+    def get_idp_from_remote_id(self, remote_id):
+        with sql.transaction() as session:
+            idp_ref = self._get_idp_from_remote_id(session, remote_id)
+        return idp_ref.to_dict()
+
+    def update_idp(self, idp_id, idp):
+        with sql.transaction() as session:
+            idp_ref = self._get_idp(session, idp_id)
+            old_idp = idp_ref.to_dict()
+            old_idp.update(idp)
+            new_idp = IdentityProviderModel.from_dict(old_idp)
+            for attr in IdentityProviderModel.mutable_attributes:
+                setattr(idp_ref, attr, getattr(new_idp, attr))
+        return idp_ref.to_dict()
+
+    # Protocol CRUD
+    def _get_protocol(self, session, idp_id, protocol_id):
+        q = session.query(FederationProtocolModel)
+        q = q.filter_by(id=protocol_id, idp_id=idp_id)
+        try:
+            return q.one()
+        except sql.NotFound:
+            kwargs = {'protocol_id': protocol_id,
+                      'idp_id': idp_id}
+            raise exception.FederatedProtocolNotFound(**kwargs)
+
+    @sql.handle_conflicts(conflict_type='federation_protocol')
+    def create_protocol(self, idp_id, protocol_id, protocol):
+        protocol['id'] = protocol_id
+        protocol['idp_id'] = idp_id
+        with sql.transaction() as session:
+            self._get_idp(session, idp_id)
+            protocol_ref = FederationProtocolModel.from_dict(protocol)
+            session.add(protocol_ref)
+        return protocol_ref.to_dict()
+
+    def update_protocol(self, idp_id, protocol_id, protocol):
+        with sql.transaction() as session:
+            proto_ref = self._get_protocol(session, idp_id, protocol_id)
+            old_proto = proto_ref.to_dict()
+            old_proto.update(protocol)
+            new_proto = FederationProtocolModel.from_dict(old_proto)
+            for attr in FederationProtocolModel.mutable_attributes:
+                setattr(proto_ref, attr, getattr(new_proto, attr))
+        return proto_ref.to_dict()
+
+    def get_protocol(self, idp_id, protocol_id):
+        with sql.transaction() as session:
+            protocol_ref = self._get_protocol(session, idp_id, protocol_id)
+        return protocol_ref.to_dict()
+
+    def list_protocols(self, idp_id):
+        with sql.transaction() as session:
+            q = session.query(FederationProtocolModel)
+            q = q.filter_by(idp_id=idp_id)
+        protocols = [protocol.to_dict() for protocol in q]
+        return protocols
+
+    def delete_protocol(self, idp_id, protocol_id):
+        with sql.transaction() as session:
+            key_ref = self._get_protocol(session, idp_id, protocol_id)
+            session.delete(key_ref)
+
+    # Mapping CRUD
+    def _get_mapping(self, session, mapping_id):
+        mapping_ref = session.query(MappingModel).get(mapping_id)
+        if not mapping_ref:
+            raise exception.MappingNotFound(mapping_id=mapping_id)
+        return mapping_ref
+
+    @sql.handle_conflicts(conflict_type='mapping')
+    def create_mapping(self, mapping_id, mapping):
+        ref = {}
+        ref['id'] = mapping_id
+        ref['rules'] = jsonutils.dumps(mapping.get('rules'))
+        with sql.transaction() as session:
+            mapping_ref = MappingModel.from_dict(ref)
+            session.add(mapping_ref)
+        return mapping_ref.to_dict()
+
+    def delete_mapping(self, mapping_id):
+        with sql.transaction() as session:
+            mapping_ref = self._get_mapping(session, mapping_id)
+            session.delete(mapping_ref)
+
+    def list_mappings(self):
+        with sql.transaction() as session:
+            mappings = session.query(MappingModel)
+        return [x.to_dict() for x in mappings]
+
+    def get_mapping(self, mapping_id):
+        with sql.transaction() as session:
+            mapping_ref = self._get_mapping(session, mapping_id)
+        return mapping_ref.to_dict()
+
+    @sql.handle_conflicts(conflict_type='mapping')
+    def update_mapping(self, mapping_id, mapping):
+        ref = {}
+        ref['id'] = mapping_id
+        ref['rules'] = jsonutils.dumps(mapping.get('rules'))
+        with sql.transaction() as session:
+            mapping_ref = self._get_mapping(session, mapping_id)
+            old_mapping = mapping_ref.to_dict()
+            old_mapping.update(ref)
+            new_mapping = MappingModel.from_dict(old_mapping)
+            for attr in MappingModel.attributes:
+                setattr(mapping_ref, attr, getattr(new_mapping, attr))
+        return mapping_ref.to_dict()
+
+    def get_mapping_from_idp_and_protocol(self, idp_id, protocol_id):
+        with sql.transaction() as session:
+            protocol_ref = self._get_protocol(session, idp_id, protocol_id)
+            mapping_id = protocol_ref.mapping_id
+            mapping_ref = self._get_mapping(session, mapping_id)
+        return mapping_ref.to_dict()
+
+    # Service Provider CRUD
+    @sql.handle_conflicts(conflict_type='service_provider')
+    def create_sp(self, sp_id, sp):
+        sp['id'] = sp_id
+        with sql.transaction() as session:
+            sp_ref = ServiceProviderModel.from_dict(sp)
+            session.add(sp_ref)
+        return sp_ref.to_dict()
+
+    def delete_sp(self, sp_id):
+        with sql.transaction() as session:
+            sp_ref = self._get_sp(session, sp_id)
+            session.delete(sp_ref)
+
+    def _get_sp(self, session, sp_id):
+        sp_ref = session.query(ServiceProviderModel).get(sp_id)
+        if not sp_ref:
+            raise exception.ServiceProviderNotFound(sp_id=sp_id)
+        return sp_ref
+
+    def list_sps(self):
+        with sql.transaction() as session:
+            sps = session.query(ServiceProviderModel)
+        sps_list = [sp.to_dict() for sp in sps]
+        return sps_list
+
+    def get_sp(self, sp_id):
+        with sql.transaction() as session:
+            sp_ref = self._get_sp(session, sp_id)
+        return sp_ref.to_dict()
+
+    def update_sp(self, sp_id, sp):
+        with sql.transaction() as session:
+            sp_ref = self._get_sp(session, sp_id)
+            old_sp = sp_ref.to_dict()
+            old_sp.update(sp)
+            new_sp = ServiceProviderModel.from_dict(old_sp)
+            for attr in ServiceProviderModel.mutable_attributes:
+                setattr(sp_ref, attr, getattr(new_sp, attr))
+        return sp_ref.to_dict()
+
+    def get_enabled_service_providers(self):
+        with sql.transaction() as session:
+            service_providers = session.query(ServiceProviderModel)
+            service_providers = service_providers.filter_by(enabled=True)
+        return service_providers
diff --git a/keystone-moon/keystone/contrib/federation/controllers.py b/keystone-moon/keystone/contrib/federation/controllers.py
new file mode 100644 (file)
index 0000000..6066a33
--- /dev/null
@@ -0,0 +1,457 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Extensions supporting Federation."""
+
+import string
+
+from oslo_config import cfg
+from oslo_log import log
+import six
+from six.moves import urllib
+import webob
+
+from keystone.auth import controllers as auth_controllers
+from keystone.common import authorization
+from keystone.common import controller
+from keystone.common import dependency
+from keystone.common import validation
+from keystone.common import wsgi
+from keystone.contrib.federation import idp as keystone_idp
+from keystone.contrib.federation import schema
+from keystone.contrib.federation import utils
+from keystone import exception
+from keystone.i18n import _
+from keystone.models import token_model
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+class _ControllerBase(controller.V3Controller):
+    """Base behaviors for federation controllers."""
+
+    @classmethod
+    def base_url(cls, context, path=None):
+        """Construct a path and pass it to V3Controller.base_url method."""
+
+        path = '/OS-FEDERATION/' + cls.collection_name
+        return super(_ControllerBase, cls).base_url(context, path=path)
+
+
+@dependency.requires('federation_api')
+class IdentityProvider(_ControllerBase):
+    """Identity Provider representation."""
+    collection_name = 'identity_providers'
+    member_name = 'identity_provider'
+
+    _mutable_parameters = frozenset(['description', 'enabled', 'remote_id'])
+    _public_parameters = frozenset(['id', 'enabled', 'description',
+                                    'remote_id', 'links'
+                                    ])
+
+    @classmethod
+    def _add_related_links(cls, context, ref):
+        """Add URLs for entities related with Identity Provider.
+
+        Add URLs pointing to:
+        - protocols tied to the Identity Provider
+
+        """
+        ref.setdefault('links', {})
+        base_path = ref['links'].get('self')
+        if base_path is None:
+            base_path = '/'.join([IdentityProvider.base_url(context),
+                                  ref['id']])
+        for name in ['protocols']:
+            ref['links'][name] = '/'.join([base_path, name])
+
+    @classmethod
+    def _add_self_referential_link(cls, context, ref):
+        id = ref.get('id')
+        self_path = '/'.join([cls.base_url(context), id])
+        ref.setdefault('links', {})
+        ref['links']['self'] = self_path
+
+    @classmethod
+    def wrap_member(cls, context, ref):
+        cls._add_self_referential_link(context, ref)
+        cls._add_related_links(context, ref)
+        ref = cls.filter_params(ref)
+        return {cls.member_name: ref}
+
+    @controller.protected()
+    def create_identity_provider(self, context, idp_id, identity_provider):
+        identity_provider = self._normalize_dict(identity_provider)
+        identity_provider.setdefault('enabled', False)
+        IdentityProvider.check_immutable_params(identity_provider)
+        idp_ref = self.federation_api.create_idp(idp_id, identity_provider)
+        response = IdentityProvider.wrap_member(context, idp_ref)
+        return wsgi.render_response(body=response, status=('201', 'Created'))
+
+    @controller.protected()
+    def list_identity_providers(self, context):
+        ref = self.federation_api.list_idps()
+        ref = [self.filter_params(x) for x in ref]
+        return IdentityProvider.wrap_collection(context, ref)
+
+    @controller.protected()
+    def get_identity_provider(self, context, idp_id):
+        ref = self.federation_api.get_idp(idp_id)
+        return IdentityProvider.wrap_member(context, ref)
+
+    @controller.protected()
+    def delete_identity_provider(self, context, idp_id):
+        self.federation_api.delete_idp(idp_id)
+
+    @controller.protected()
+    def update_identity_provider(self, context, idp_id, identity_provider):
+        identity_provider = self._normalize_dict(identity_provider)
+        IdentityProvider.check_immutable_params(identity_provider)
+        idp_ref = self.federation_api.update_idp(idp_id, identity_provider)
+        return IdentityProvider.wrap_member(context, idp_ref)
+
+
+@dependency.requires('federation_api')
+class FederationProtocol(_ControllerBase):
+    """A federation protocol representation.
+
+    See IdentityProvider docstring for explanation on _mutable_parameters
+    and _public_parameters class attributes.
+
+    """
+    collection_name = 'protocols'
+    member_name = 'protocol'
+
+    _public_parameters = frozenset(['id', 'mapping_id', 'links'])
+    _mutable_parameters = frozenset(['mapping_id'])
+
+    @classmethod
+    def _add_self_referential_link(cls, context, ref):
+        """Add 'links' entry to the response dictionary.
+
+        Calls IdentityProvider.base_url() class method, as it constructs
+        proper URL along with the 'identity providers' part included.
+
+        :param ref: response dictionary
+
+        """
+        ref.setdefault('links', {})
+        base_path = ref['links'].get('identity_provider')
+        if base_path is None:
+            base_path = [IdentityProvider.base_url(context), ref['idp_id']]
+            base_path = '/'.join(base_path)
+        self_path = [base_path, 'protocols', ref['id']]
+        self_path = '/'.join(self_path)
+        ref['links']['self'] = self_path
+
+    @classmethod
+    def _add_related_links(cls, context, ref):
+        """Add new entries to the 'links' subdictionary in the response.
+
+        Adds 'identity_provider' key with URL pointing to related identity
+        provider as a value.
+
+        :param ref: response dictionary
+
+        """
+        ref.setdefault('links', {})
+        base_path = '/'.join([IdentityProvider.base_url(context),
+                              ref['idp_id']])
+        ref['links']['identity_provider'] = base_path
+
+    @classmethod
+    def wrap_member(cls, context, ref):
+        cls._add_related_links(context, ref)
+        cls._add_self_referential_link(context, ref)
+        ref = cls.filter_params(ref)
+        return {cls.member_name: ref}
+
+    @controller.protected()
+    def create_protocol(self, context, idp_id, protocol_id, protocol):
+        ref = self._normalize_dict(protocol)
+        FederationProtocol.check_immutable_params(ref)
+        ref = self.federation_api.create_protocol(idp_id, protocol_id, ref)
+        response = FederationProtocol.wrap_member(context, ref)
+        return wsgi.render_response(body=response, status=('201', 'Created'))
+
+    @controller.protected()
+    def update_protocol(self, context, idp_id, protocol_id, protocol):
+        ref = self._normalize_dict(protocol)
+        FederationProtocol.check_immutable_params(ref)
+        ref = self.federation_api.update_protocol(idp_id, protocol_id,
+                                                  protocol)
+        return FederationProtocol.wrap_member(context, ref)
+
+    @controller.protected()
+    def get_protocol(self, context, idp_id, protocol_id):
+        ref = self.federation_api.get_protocol(idp_id, protocol_id)
+        return FederationProtocol.wrap_member(context, ref)
+
+    @controller.protected()
+    def list_protocols(self, context, idp_id):
+        protocols_ref = self.federation_api.list_protocols(idp_id)
+        protocols = list(protocols_ref)
+        return FederationProtocol.wrap_collection(context, protocols)
+
+    @controller.protected()
+    def delete_protocol(self, context, idp_id, protocol_id):
+        self.federation_api.delete_protocol(idp_id, protocol_id)
+
+
+@dependency.requires('federation_api')
+class MappingController(_ControllerBase):
+    collection_name = 'mappings'
+    member_name = 'mapping'
+
+    @controller.protected()
+    def create_mapping(self, context, mapping_id, mapping):
+        ref = self._normalize_dict(mapping)
+        utils.validate_mapping_structure(ref)
+        mapping_ref = self.federation_api.create_mapping(mapping_id, ref)
+        response = MappingController.wrap_member(context, mapping_ref)
+        return wsgi.render_response(body=response, status=('201', 'Created'))
+
+    @controller.protected()
+    def list_mappings(self, context):
+        ref = self.federation_api.list_mappings()
+        return MappingController.wrap_collection(context, ref)
+
+    @controller.protected()
+    def get_mapping(self, context, mapping_id):
+        ref = self.federation_api.get_mapping(mapping_id)
+        return MappingController.wrap_member(context, ref)
+
+    @controller.protected()
+    def delete_mapping(self, context, mapping_id):
+        self.federation_api.delete_mapping(mapping_id)
+
+    @controller.protected()
+    def update_mapping(self, context, mapping_id, mapping):
+        mapping = self._normalize_dict(mapping)
+        utils.validate_mapping_structure(mapping)
+        mapping_ref = self.federation_api.update_mapping(mapping_id, mapping)
+        return MappingController.wrap_member(context, mapping_ref)
+
+
+@dependency.requires('federation_api')
+class Auth(auth_controllers.Auth):
+
+    def federated_authentication(self, context, identity_provider, protocol):
+        """Authenticate from dedicated url endpoint.
+
+        Build HTTP request body for federated authentication and inject
+        it into the ``authenticate_for_token`` function.
+
+        """
+        auth = {
+            'identity': {
+                'methods': [protocol],
+                protocol: {
+                    'identity_provider': identity_provider,
+                    'protocol': protocol
+                }
+            }
+        }
+
+        return self.authenticate_for_token(context, auth=auth)
+
+    def federated_sso_auth(self, context, protocol_id):
+        try:
+            remote_id_name = CONF.federation.remote_id_attribute
+            remote_id = context['environment'][remote_id_name]
+        except KeyError:
+            msg = _('Missing entity ID from environment')
+            LOG.error(msg)
+            raise exception.Unauthorized(msg)
+
+        if 'origin' in context['query_string']:
+            origin = context['query_string'].get('origin')
+            host = urllib.parse.unquote_plus(origin)
+        else:
+            msg = _('Request must have an origin query parameter')
+            LOG.error(msg)
+            raise exception.ValidationError(msg)
+
+        if host in CONF.federation.trusted_dashboard:
+            ref = self.federation_api.get_idp_from_remote_id(remote_id)
+            identity_provider = ref['id']
+            res = self.federated_authentication(context, identity_provider,
+                                                protocol_id)
+            token_id = res.headers['X-Subject-Token']
+            return self.render_html_response(host, token_id)
+        else:
+            msg = _('%(host)s is not a trusted dashboard host')
+            msg = msg % {'host': host}
+            LOG.error(msg)
+            raise exception.Unauthorized(msg)
+
+    def render_html_response(self, host, token_id):
+        """Forms an HTML Form from a template with autosubmit."""
+
+        headers = [('Content-Type', 'text/html')]
+
+        with open(CONF.federation.sso_callback_template) as template:
+            src = string.Template(template.read())
+
+        subs = {'host': host, 'token': token_id}
+        body = src.substitute(subs)
+        return webob.Response(body=body, status='200',
+                              headerlist=headers)
+
+    @validation.validated(schema.saml_create, 'auth')
+    def create_saml_assertion(self, context, auth):
+        """Exchange a scoped token for a SAML assertion.
+
+        :param auth: Dictionary that contains a token and service provider id
+        :returns: SAML Assertion based on properties from the token
+        """
+
+        issuer = CONF.saml.idp_entity_id
+        sp_id = auth['scope']['service_provider']['id']
+        service_provider = self.federation_api.get_sp(sp_id)
+        utils.assert_enabled_service_provider_object(service_provider)
+
+        sp_url = service_provider.get('sp_url')
+        auth_url = service_provider.get('auth_url')
+
+        token_id = auth['identity']['token']['id']
+        token_data = self.token_provider_api.validate_token(token_id)
+        token_ref = token_model.KeystoneToken(token_id, token_data)
+        subject = token_ref.user_name
+        roles = token_ref.role_names
+
+        if not token_ref.project_scoped:
+            action = _('Use a project scoped token when attempting to create '
+                       'a SAML assertion')
+            raise exception.ForbiddenAction(action=action)
+
+        project = token_ref.project_name
+        generator = keystone_idp.SAMLGenerator()
+        response = generator.samlize_token(issuer, sp_url, subject, roles,
+                                           project)
+
+        return wsgi.render_response(body=response.to_string(),
+                                    status=('200', 'OK'),
+                                    headers=[('Content-Type', 'text/xml'),
+                                             ('X-sp-url',
+                                              six.binary_type(sp_url)),
+                                             ('X-auth-url',
+                                              six.binary_type(auth_url))])
+
+
+@dependency.requires('assignment_api', 'resource_api')
+class DomainV3(controller.V3Controller):
+    collection_name = 'domains'
+    member_name = 'domain'
+
+    def __init__(self):
+        super(DomainV3, self).__init__()
+        self.get_member_from_driver = self.resource_api.get_domain
+
+    @controller.protected()
+    def list_domains_for_groups(self, context):
+        """List all domains available to an authenticated user's groups.
+
+        :param context: request context
+        :returns: list of accessible domains
+
+        """
+        auth_context = context['environment'][authorization.AUTH_CONTEXT_ENV]
+        domains = self.assignment_api.list_domains_for_groups(
+            auth_context['group_ids'])
+        return DomainV3.wrap_collection(context, domains)
+
+
+@dependency.requires('assignment_api', 'resource_api')
+class ProjectAssignmentV3(controller.V3Controller):
+    collection_name = 'projects'
+    member_name = 'project'
+
+    def __init__(self):
+        super(ProjectAssignmentV3, self).__init__()
+        self.get_member_from_driver = self.resource_api.get_project
+
+    @controller.protected()
+    def list_projects_for_groups(self, context):
+        """List all projects available to an authenticated user's groups.
+
+        :param context: request context
+        :returns: list of accessible projects
+
+        """
+        auth_context = context['environment'][authorization.AUTH_CONTEXT_ENV]
+        projects = self.assignment_api.list_projects_for_groups(
+            auth_context['group_ids'])
+        return ProjectAssignmentV3.wrap_collection(context, projects)
+
+
+@dependency.requires('federation_api')
+class ServiceProvider(_ControllerBase):
+    """Service Provider representation."""
+
+    collection_name = 'service_providers'
+    member_name = 'service_provider'
+
+    _mutable_parameters = frozenset(['auth_url', 'description', 'enabled',
+                                     'sp_url'])
+    _public_parameters = frozenset(['auth_url', 'id', 'enabled', 'description',
+                                    'links', 'sp_url'])
+
+    @controller.protected()
+    @validation.validated(schema.service_provider_create, 'service_provider')
+    def create_service_provider(self, context, sp_id, service_provider):
+        service_provider = self._normalize_dict(service_provider)
+        service_provider.setdefault('enabled', False)
+        ServiceProvider.check_immutable_params(service_provider)
+        sp_ref = self.federation_api.create_sp(sp_id, service_provider)
+        response = ServiceProvider.wrap_member(context, sp_ref)
+        return wsgi.render_response(body=response, status=('201', 'Created'))
+
+    @controller.protected()
+    def list_service_providers(self, context):
+        ref = self.federation_api.list_sps()
+        ref = [self.filter_params(x) for x in ref]
+        return ServiceProvider.wrap_collection(context, ref)
+
+    @controller.protected()
+    def get_service_provider(self, context, sp_id):
+        ref = self.federation_api.get_sp(sp_id)
+        return ServiceProvider.wrap_member(context, ref)
+
+    @controller.protected()
+    def delete_service_provider(self, context, sp_id):
+        self.federation_api.delete_sp(sp_id)
+
+    @controller.protected()
+    @validation.validated(schema.service_provider_update, 'service_provider')
+    def update_service_provider(self, context, sp_id, service_provider):
+        service_provider = self._normalize_dict(service_provider)
+        ServiceProvider.check_immutable_params(service_provider)
+        sp_ref = self.federation_api.update_sp(sp_id, service_provider)
+        return ServiceProvider.wrap_member(context, sp_ref)
+
+
+class SAMLMetadataV3(_ControllerBase):
+    member_name = 'metadata'
+
+    def get_metadata(self, context):
+        metadata_path = CONF.saml.idp_metadata_path
+        try:
+            with open(metadata_path, 'r') as metadata_handler:
+                metadata = metadata_handler.read()
+        except IOError as e:
+            # Raise HTTP 500 in case Metadata file cannot be read.
+            raise exception.MetadataFileError(reason=e)
+        return wsgi.render_response(body=metadata, status=('200', 'OK'),
+                                    headers=[('Content-Type', 'text/xml')])
diff --git a/keystone-moon/keystone/contrib/federation/core.py b/keystone-moon/keystone/contrib/federation/core.py
new file mode 100644 (file)
index 0000000..b596cff
--- /dev/null
@@ -0,0 +1,346 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Extension supporting Federation."""
+
+import abc
+
+from oslo_config import cfg
+from oslo_log import log as logging
+import six
+
+from keystone.common import dependency
+from keystone.common import extension
+from keystone.common import manager
+from keystone import exception
+
+
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
+EXTENSION_DATA = {
+    'name': 'OpenStack Federation APIs',
+    'namespace': 'http://docs.openstack.org/identity/api/ext/'
+                 'OS-FEDERATION/v1.0',
+    'alias': 'OS-FEDERATION',
+    'updated': '2013-12-17T12:00:0-00:00',
+    'description': 'OpenStack Identity Providers Mechanism.',
+    'links': [{
+        'rel': 'describedby',
+        'type': 'text/html',
+        'href': 'https://github.com/openstack/identity-api'
+    }]}
+extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
+extension.register_public_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
+
+FEDERATION = 'OS-FEDERATION'
+IDENTITY_PROVIDER = 'OS-FEDERATION:identity_provider'
+PROTOCOL = 'OS-FEDERATION:protocol'
+FEDERATED_DOMAIN_KEYWORD = 'Federated'
+
+
+@dependency.provider('federation_api')
+class Manager(manager.Manager):
+    """Default pivot point for the Federation backend.
+
+    See :mod:`keystone.common.manager.Manager` for more details on how this
+    dynamically calls the backend.
+
+    """
+    def __init__(self):
+        super(Manager, self).__init__(CONF.federation.driver)
+
+    def get_enabled_service_providers(self):
+        """List enabled service providers for Service Catalog
+
+        Service Provider in a catalog contains three attributes: ``id``,
+        ``auth_url``, ``sp_url``, where:
+
+        - id is an unique, user defined identifier for service provider object
+        - auth_url is a authentication URL of remote Keystone
+        - sp_url a URL accessible at the remote service provider where SAML
+          assertion is transmitted.
+
+        :returns: list of dictionaries with enabled service providers
+        :rtype: list of dicts
+
+        """
+        def normalize(sp):
+            ref = {
+                'auth_url': sp.auth_url,
+                'id': sp.id,
+                'sp_url': sp.sp_url
+            }
+            return ref
+
+        service_providers = self.driver.get_enabled_service_providers()
+        return [normalize(sp) for sp in service_providers]
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Driver(object):
+
+    @abc.abstractmethod
+    def create_idp(self, idp_id, idp):
+        """Create an identity provider.
+
+        :returns: idp_ref
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_idp(self, idp_id):
+        """Delete an identity provider.
+
+        :raises: keystone.exception.IdentityProviderNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_idps(self):
+        """List all identity providers.
+
+        :raises: keystone.exception.IdentityProviderNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_idp(self, idp_id):
+        """Get an identity provider by ID.
+
+        :raises: keystone.exception.IdentityProviderNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_idp_from_remote_id(self, remote_id):
+        """Get an identity provider by remote ID.
+
+        :raises: keystone.exception.IdentityProviderNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def update_idp(self, idp_id, idp):
+        """Update an identity provider by ID.
+
+        :raises: keystone.exception.IdentityProviderNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def create_protocol(self, idp_id, protocol_id, protocol):
+        """Add an IdP-Protocol configuration.
+
+        :raises: keystone.exception.IdentityProviderNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def update_protocol(self, idp_id, protocol_id, protocol):
+        """Change an IdP-Protocol configuration.
+
+        :raises: keystone.exception.IdentityProviderNotFound,
+                 keystone.exception.FederatedProtocolNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_protocol(self, idp_id, protocol_id):
+        """Get an IdP-Protocol configuration.
+
+        :raises: keystone.exception.IdentityProviderNotFound,
+                 keystone.exception.FederatedProtocolNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_protocols(self, idp_id):
+        """List an IdP's supported protocols.
+
+        :raises: keystone.exception.IdentityProviderNotFound,
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_protocol(self, idp_id, protocol_id):
+        """Delete an IdP-Protocol configuration.
+
+        :raises: keystone.exception.IdentityProviderNotFound,
+                 keystone.exception.FederatedProtocolNotFound,
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def create_mapping(self, mapping_ref):
+        """Create a mapping.
+
+        :param mapping_ref: mapping ref with mapping name
+        :type mapping_ref: dict
+        :returns: mapping_ref
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_mapping(self, mapping_id):
+        """Delete a mapping.
+
+        :param mapping_id: id of mapping to delete
+        :type mapping_ref: string
+        :returns: None
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def update_mapping(self, mapping_id, mapping_ref):
+        """Update a mapping.
+
+        :param mapping_id: id of mapping to update
+        :type mapping_id: string
+        :param mapping_ref: new mapping ref
+        :type mapping_ref: dict
+        :returns: mapping_ref
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_mappings(self):
+        """List all mappings.
+
+        returns: list of mappings
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_mapping(self, mapping_id):
+        """Get a mapping, returns the mapping based
+        on mapping_id.
+
+        :param mapping_id: id of mapping to get
+        :type mapping_ref: string
+        :returns: mapping_ref
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_mapping_from_idp_and_protocol(self, idp_id, protocol_id):
+        """Get mapping based on idp_id and protocol_id.
+
+        :param idp_id: id of the identity provider
+        :type idp_id: string
+        :param protocol_id: id of the protocol
+        :type protocol_id: string
+        :raises: keystone.exception.IdentityProviderNotFound,
+                 keystone.exception.FederatedProtocolNotFound,
+        :returns: mapping_ref
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def create_sp(self, sp_id, sp):
+        """Create a service provider.
+
+        :param sp_id: id of the service provider
+        :type sp_id: string
+        :param sp: service prvider object
+        :type sp: dict
+
+        :returns: sp_ref
+        :rtype: dict
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_sp(self, sp_id):
+        """Delete a service provider.
+
+        :param sp_id: id of the service provider
+        :type sp_id: string
+
+        :raises: keystone.exception.ServiceProviderNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_sps(self):
+        """List all service providers.
+
+        :returns List of sp_ref objects
+        :rtype: list of dicts
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_sp(self, sp_id):
+        """Get a service provider.
+
+        :param sp_id: id of the service provider
+        :type sp_id: string
+
+        :returns: sp_ref
+        :raises: keystone.exception.ServiceProviderNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def update_sp(self, sp_id, sp):
+        """Update a service provider.
+
+        :param sp_id: id of the service provider
+        :type sp_id: string
+        :param sp: service prvider object
+        :type sp: dict
+
+        :returns: sp_ref
+        :rtype: dict
+
+        :raises: keystone.exception.ServiceProviderNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def get_enabled_service_providers(self):
+        """List enabled service providers for Service Catalog
+
+        Service Provider in a catalog contains three attributes: ``id``,
+        ``auth_url``, ``sp_url``, where:
+
+        - id is an unique, user defined identifier for service provider object
+        - auth_url is a authentication URL of remote Keystone
+        - sp_url a URL accessible at the remote service provider where SAML
+          assertion is transmitted.
+
+        :returns: list of dictionaries with enabled service providers
+        :rtype: list of dicts
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
diff --git a/keystone-moon/keystone/contrib/federation/idp.py b/keystone-moon/keystone/contrib/federation/idp.py
new file mode 100644 (file)
index 0000000..bf40013
--- /dev/null
@@ -0,0 +1,558 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import os
+import subprocess
+import uuid
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import timeutils
+import saml2
+from saml2 import md
+from saml2 import saml
+from saml2 import samlp
+from saml2 import sigver
+import xmldsig
+
+from keystone import exception
+from keystone.i18n import _, _LE
+from keystone.openstack.common import fileutils
+
+
+LOG = log.getLogger(__name__)
+CONF = cfg.CONF
+
+
+class SAMLGenerator(object):
+    """A class to generate SAML assertions."""
+
+    def __init__(self):
+        self.assertion_id = uuid.uuid4().hex
+
+    def samlize_token(self, issuer, recipient, user, roles, project,
+                      expires_in=None):
+        """Convert Keystone attributes to a SAML assertion.
+
+        :param issuer: URL of the issuing party
+        :type issuer: string
+        :param recipient: URL of the recipient
+        :type recipient: string
+        :param user: User name
+        :type user: string
+        :param roles: List of role names
+        :type roles: list
+        :param project: Project name
+        :type project: string
+        :param expires_in: Sets how long the assertion is valid for, in seconds
+        :type expires_in: int
+
+        :return: XML <Response> object
+
+        """
+        expiration_time = self._determine_expiration_time(expires_in)
+        status = self._create_status()
+        saml_issuer = self._create_issuer(issuer)
+        subject = self._create_subject(user, expiration_time, recipient)
+        attribute_statement = self._create_attribute_statement(user, roles,
+                                                               project)
+        authn_statement = self._create_authn_statement(issuer, expiration_time)
+        signature = self._create_signature()
+
+        assertion = self._create_assertion(saml_issuer, signature,
+                                           subject, authn_statement,
+                                           attribute_statement)
+
+        assertion = _sign_assertion(assertion)
+
+        response = self._create_response(saml_issuer, status, assertion,
+                                         recipient)
+        return response
+
+    def _determine_expiration_time(self, expires_in):
+        if expires_in is None:
+            expires_in = CONF.saml.assertion_expiration_time
+        now = timeutils.utcnow()
+        future = now + datetime.timedelta(seconds=expires_in)
+        return timeutils.isotime(future, subsecond=True)
+
+    def _create_status(self):
+        """Create an object that represents a SAML Status.
+
+        <ns0:Status xmlns:ns0="urn:oasis:names:tc:SAML:2.0:protocol">
+            <ns0:StatusCode
+              Value="urn:oasis:names:tc:SAML:2.0:status:Success" />
+        </ns0:Status>
+
+        :return: XML <Status> object
+
+        """
+        status = samlp.Status()
+        status_code = samlp.StatusCode()
+        status_code.value = samlp.STATUS_SUCCESS
+        status_code.set_text('')
+        status.status_code = status_code
+        return status
+
+    def _create_issuer(self, issuer_url):
+        """Create an object that represents a SAML Issuer.
+
+        <ns0:Issuer
+          xmlns:ns0="urn:oasis:names:tc:SAML:2.0:assertion"
+          Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">
+          https://acme.com/FIM/sps/openstack/saml20</ns0:Issuer>
+
+        :return: XML <Issuer> object
+
+        """
+        issuer = saml.Issuer()
+        issuer.format = saml.NAMEID_FORMAT_ENTITY
+        issuer.set_text(issuer_url)
+        return issuer
+
+    def _create_subject(self, user, expiration_time, recipient):
+        """Create an object that represents a SAML Subject.
+
+        <ns0:Subject>
+            <ns0:NameID>
+                john@smith.com</ns0:NameID>
+            <ns0:SubjectConfirmation
+              Method="urn:oasis:names:tc:SAML:2.0:cm:bearer">
+                <ns0:SubjectConfirmationData
+                  NotOnOrAfter="2014-08-19T11:53:57.243106Z"
+                  Recipient="http://beta.com/Shibboleth.sso/SAML2/POST" />
+            </ns0:SubjectConfirmation>
+        </ns0:Subject>
+
+        :return: XML <Subject> object
+
+        """
+        name_id = saml.NameID()
+        name_id.set_text(user)
+        subject_conf_data = saml.SubjectConfirmationData()
+        subject_conf_data.recipient = recipient
+        subject_conf_data.not_on_or_after = expiration_time
+        subject_conf = saml.SubjectConfirmation()
+        subject_conf.method = saml.SCM_BEARER
+        subject_conf.subject_confirmation_data = subject_conf_data
+        subject = saml.Subject()
+        subject.subject_confirmation = subject_conf
+        subject.name_id = name_id
+        return subject
+
+    def _create_attribute_statement(self, user, roles, project):
+        """Create an object that represents a SAML AttributeStatement.
+
+        <ns0:AttributeStatement
+          xmlns:ns0="urn:oasis:names:tc:SAML:2.0:assertion"
+          xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+            <ns0:Attribute Name="openstack_user">
+                <ns0:AttributeValue
+                  xsi:type="xs:string">test_user</ns0:AttributeValue>
+            </ns0:Attribute>
+            <ns0:Attribute Name="openstack_roles">
+                <ns0:AttributeValue
+                  xsi:type="xs:string">admin</ns0:AttributeValue>
+                <ns0:AttributeValue
+                  xsi:type="xs:string">member</ns0:AttributeValue>
+            </ns0:Attribute>
+            <ns0:Attribute Name="openstack_projects">
+                <ns0:AttributeValue
+                  xsi:type="xs:string">development</ns0:AttributeValue>
+            </ns0:Attribute>
+        </ns0:AttributeStatement>
+
+        :return: XML <AttributeStatement> object
+
+        """
+        openstack_user = 'openstack_user'
+        user_attribute = saml.Attribute()
+        user_attribute.name = openstack_user
+        user_value = saml.AttributeValue()
+        user_value.set_text(user)
+        user_attribute.attribute_value = user_value
+
+        openstack_roles = 'openstack_roles'
+        roles_attribute = saml.Attribute()
+        roles_attribute.name = openstack_roles
+
+        for role in roles:
+            role_value = saml.AttributeValue()
+            role_value.set_text(role)
+            roles_attribute.attribute_value.append(role_value)
+
+        openstack_project = 'openstack_project'
+        project_attribute = saml.Attribute()
+        project_attribute.name = openstack_project
+        project_value = saml.AttributeValue()
+        project_value.set_text(project)
+        project_attribute.attribute_value = project_value
+
+        attribute_statement = saml.AttributeStatement()
+        attribute_statement.attribute.append(user_attribute)
+        attribute_statement.attribute.append(roles_attribute)
+        attribute_statement.attribute.append(project_attribute)
+        return attribute_statement
+
+    def _create_authn_statement(self, issuer, expiration_time):
+        """Create an object that represents a SAML AuthnStatement.
+
+        <ns0:AuthnStatement xmlns:ns0="urn:oasis:names:tc:SAML:2.0:assertion"
+          AuthnInstant="2014-07-30T03:04:25Z" SessionIndex="47335964efb"
+          SessionNotOnOrAfter="2014-07-30T03:04:26Z">
+            <ns0:AuthnContext>
+                <ns0:AuthnContextClassRef>
+                  urn:oasis:names:tc:SAML:2.0:ac:classes:Password
+                </ns0:AuthnContextClassRef>
+                <ns0:AuthenticatingAuthority>
+                  https://acme.com/FIM/sps/openstack/saml20
+                </ns0:AuthenticatingAuthority>
+            </ns0:AuthnContext>
+        </ns0:AuthnStatement>
+
+        :return: XML <AuthnStatement> object
+
+        """
+        authn_statement = saml.AuthnStatement()
+        authn_statement.authn_instant = timeutils.isotime()
+        authn_statement.session_index = uuid.uuid4().hex
+        authn_statement.session_not_on_or_after = expiration_time
+
+        authn_context = saml.AuthnContext()
+        authn_context_class = saml.AuthnContextClassRef()
+        authn_context_class.set_text(saml.AUTHN_PASSWORD)
+
+        authn_authority = saml.AuthenticatingAuthority()
+        authn_authority.set_text(issuer)
+        authn_context.authn_context_class_ref = authn_context_class
+        authn_context.authenticating_authority = authn_authority
+
+        authn_statement.authn_context = authn_context
+
+        return authn_statement
+
+    def _create_assertion(self, issuer, signature, subject, authn_statement,
+                          attribute_statement):
+        """Create an object that represents a SAML Assertion.
+
+        <ns0:Assertion
+          ID="35daed258ba647ba8962e9baff4d6a46"
+          IssueInstant="2014-06-11T15:45:58Z"
+          Version="2.0">
+            <ns0:Issuer> ... </ns0:Issuer>
+            <ns1:Signature> ... </ns1:Signature>
+            <ns0:Subject> ... </ns0:Subject>
+            <ns0:AuthnStatement> ... </ns0:AuthnStatement>
+            <ns0:AttributeStatement> ... </ns0:AttributeStatement>
+        </ns0:Assertion>
+
+        :return: XML <Assertion> object
+
+        """
+        assertion = saml.Assertion()
+        assertion.id = self.assertion_id
+        assertion.issue_instant = timeutils.isotime()
+        assertion.version = '2.0'
+        assertion.issuer = issuer
+        assertion.signature = signature
+        assertion.subject = subject
+        assertion.authn_statement = authn_statement
+        assertion.attribute_statement = attribute_statement
+        return assertion
+
+    def _create_response(self, issuer, status, assertion, recipient):
+        """Create an object that represents a SAML Response.
+
+        <ns0:Response
+          Destination="http://beta.com/Shibboleth.sso/SAML2/POST"
+          ID="c5954543230e4e778bc5b92923a0512d"
+          IssueInstant="2014-07-30T03:19:45Z"
+          Version="2.0" />
+            <ns0:Issuer> ... </ns0:Issuer>
+            <ns0:Assertion> ... </ns0:Assertion>
+            <ns0:Status> ... </ns0:Status>
+        </ns0:Response>
+
+        :return: XML <Response> object
+
+        """
+        response = samlp.Response()
+        response.id = uuid.uuid4().hex
+        response.destination = recipient
+        response.issue_instant = timeutils.isotime()
+        response.version = '2.0'
+        response.issuer = issuer
+        response.status = status
+        response.assertion = assertion
+        return response
+
+    def _create_signature(self):
+        """Create an object that represents a SAML <Signature>.
+
+        This must be filled with algorithms that the signing binary will apply
+        in order to sign the whole message.
+        Currently we enforce X509 signing.
+        Example of the template::
+
+        <Signature xmlns="http://www.w3.org/2000/09/xmldsig#">
+          <SignedInfo>
+            <CanonicalizationMethod
+              Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/>
+            <SignatureMethod
+              Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1"/>
+            <Reference URI="#<Assertion ID>">
+              <Transforms>
+                <Transform
+            Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature"/>
+               <Transform Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/>
+              </Transforms>
+             <DigestMethod Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/>
+             <DigestValue />
+            </Reference>
+          </SignedInfo>
+          <SignatureValue />
+          <KeyInfo>
+            <X509Data />
+          </KeyInfo>
+        </Signature>
+
+        :return: XML <Signature> object
+
+        """
+        canonicalization_method = xmldsig.CanonicalizationMethod()
+        canonicalization_method.algorithm = xmldsig.ALG_EXC_C14N
+        signature_method = xmldsig.SignatureMethod(
+            algorithm=xmldsig.SIG_RSA_SHA1)
+
+        transforms = xmldsig.Transforms()
+        envelope_transform = xmldsig.Transform(
+            algorithm=xmldsig.TRANSFORM_ENVELOPED)
+
+        c14_transform = xmldsig.Transform(algorithm=xmldsig.ALG_EXC_C14N)
+        transforms.transform = [envelope_transform, c14_transform]
+
+        digest_method = xmldsig.DigestMethod(algorithm=xmldsig.DIGEST_SHA1)
+        digest_value = xmldsig.DigestValue()
+
+        reference = xmldsig.Reference()
+        reference.uri = '#' + self.assertion_id
+        reference.digest_method = digest_method
+        reference.digest_value = digest_value
+        reference.transforms = transforms
+
+        signed_info = xmldsig.SignedInfo()
+        signed_info.canonicalization_method = canonicalization_method
+        signed_info.signature_method = signature_method
+        signed_info.reference = reference
+
+        key_info = xmldsig.KeyInfo()
+        key_info.x509_data = xmldsig.X509Data()
+
+        signature = xmldsig.Signature()
+        signature.signed_info = signed_info
+        signature.signature_value = xmldsig.SignatureValue()
+        signature.key_info = key_info
+
+        return signature
+
+
+def _sign_assertion(assertion):
+    """Sign a SAML assertion.
+
+    This method utilizes ``xmlsec1`` binary and signs SAML assertions in a
+    separate process. ``xmlsec1`` cannot read input data from stdin so the
+    prepared assertion needs to be serialized and stored in a temporary
+    file. This file will be deleted immediately after ``xmlsec1`` returns.
+    The signed assertion is redirected to a standard output and read using
+    subprocess.PIPE redirection. A ``saml.Assertion`` class is created
+    from the signed string again and returned.
+
+    Parameters that are required in the CONF::
+    * xmlsec_binary
+    * private key file path
+    * public key file path
+    :return: XML <Assertion> object
+
+    """
+    xmlsec_binary = CONF.saml.xmlsec1_binary
+    idp_private_key = CONF.saml.keyfile
+    idp_public_key = CONF.saml.certfile
+
+    # xmlsec1 --sign --privkey-pem privkey,cert --id-attr:ID <tag> <file>
+    certificates = '%(idp_private_key)s,%(idp_public_key)s' % {
+        'idp_public_key': idp_public_key,
+        'idp_private_key': idp_private_key
+    }
+
+    command_list = [xmlsec_binary, '--sign', '--privkey-pem', certificates,
+                    '--id-attr:ID', 'Assertion']
+
+    try:
+        # NOTE(gyee): need to make the namespace prefixes explicit so
+        # they won't get reassigned when we wrap the assertion into
+        # SAML2 response
+        file_path = fileutils.write_to_tempfile(assertion.to_string(
+            nspair={'saml': saml2.NAMESPACE,
+                    'xmldsig': xmldsig.NAMESPACE}))
+        command_list.append(file_path)
+        stdout = subprocess.check_output(command_list)
+    except Exception as e:
+        msg = _LE('Error when signing assertion, reason: %(reason)s')
+        msg = msg % {'reason': e}
+        LOG.error(msg)
+        raise exception.SAMLSigningError(reason=e)
+    finally:
+        try:
+            os.remove(file_path)
+        except OSError:
+            pass
+
+    return saml2.create_class_from_xml_string(saml.Assertion, stdout)
+
+
+class MetadataGenerator(object):
+    """A class for generating SAML IdP Metadata."""
+
+    def generate_metadata(self):
+        """Generate Identity Provider Metadata.
+
+        Generate and format metadata into XML that can be exposed and
+        consumed by a federated Service Provider.
+
+        :return: XML <EntityDescriptor> object.
+        :raises: keystone.exception.ValidationError: Raises if the required
+                                                     config options aren't set.
+
+        """
+        self._ensure_required_values_present()
+        entity_descriptor = self._create_entity_descriptor()
+        entity_descriptor.idpsso_descriptor = (
+            self._create_idp_sso_descriptor())
+        return entity_descriptor
+
+    def _create_entity_descriptor(self):
+        ed = md.EntityDescriptor()
+        ed.entity_id = CONF.saml.idp_entity_id
+        return ed
+
+    def _create_idp_sso_descriptor(self):
+
+        def get_cert():
+            try:
+                return sigver.read_cert_from_file(CONF.saml.certfile, 'pem')
+            except (IOError, sigver.CertificateError) as e:
+                msg = _('Cannot open certificate %(cert_file)s. '
+                        'Reason: %(reason)s')
+                msg = msg % {'cert_file': CONF.saml.certfile, 'reason': e}
+                LOG.error(msg)
+                raise IOError(msg)
+
+        def key_descriptor():
+            cert = get_cert()
+            return md.KeyDescriptor(
+                key_info=xmldsig.KeyInfo(
+                    x509_data=xmldsig.X509Data(
+                        x509_certificate=xmldsig.X509Certificate(text=cert)
+                    )
+                ), use='signing'
+            )
+
+        def single_sign_on_service():
+            idp_sso_endpoint = CONF.saml.idp_sso_endpoint
+            return md.SingleSignOnService(
+                binding=saml2.BINDING_URI,
+                location=idp_sso_endpoint)
+
+        def organization():
+            name = md.OrganizationName(lang=CONF.saml.idp_lang,
+                                       text=CONF.saml.idp_organization_name)
+            display_name = md.OrganizationDisplayName(
+                lang=CONF.saml.idp_lang,
+                text=CONF.saml.idp_organization_display_name)
+            url = md.OrganizationURL(lang=CONF.saml.idp_lang,
+                                     text=CONF.saml.idp_organization_url)
+
+            return md.Organization(
+                organization_display_name=display_name,
+                organization_url=url, organization_name=name)
+
+        def contact_person():
+            company = md.Company(text=CONF.saml.idp_contact_company)
+            given_name = md.GivenName(text=CONF.saml.idp_contact_name)
+            surname = md.SurName(text=CONF.saml.idp_contact_surname)
+            email = md.EmailAddress(text=CONF.saml.idp_contact_email)
+            telephone = md.TelephoneNumber(
+                text=CONF.saml.idp_contact_telephone)
+            contact_type = CONF.saml.idp_contact_type
+
+            return md.ContactPerson(
+                company=company, given_name=given_name, sur_name=surname,
+                email_address=email, telephone_number=telephone,
+                contact_type=contact_type)
+
+        def name_id_format():
+            return md.NameIDFormat(text=saml.NAMEID_FORMAT_TRANSIENT)
+
+        idpsso = md.IDPSSODescriptor()
+        idpsso.protocol_support_enumeration = samlp.NAMESPACE
+        idpsso.key_descriptor = key_descriptor()
+        idpsso.single_sign_on_service = single_sign_on_service()
+        idpsso.name_id_format = name_id_format()
+        if self._check_organization_values():
+            idpsso.organization = organization()
+        if self._check_contact_person_values():
+            idpsso.contact_person = contact_person()
+        return idpsso
+
+    def _ensure_required_values_present(self):
+        """Ensure idp_sso_endpoint and idp_entity_id have values."""
+
+        if CONF.saml.idp_entity_id is None:
+            msg = _('Ensure configuration option idp_entity_id is set.')
+            raise exception.ValidationError(msg)
+        if CONF.saml.idp_sso_endpoint is None:
+            msg = _('Ensure configuration option idp_sso_endpoint is set.')
+            raise exception.ValidationError(msg)
+
+    def _check_contact_person_values(self):
+        """Determine if contact information is included in metadata."""
+
+        # Check if we should include contact information
+        params = [CONF.saml.idp_contact_company,
+                  CONF.saml.idp_contact_name,
+                  CONF.saml.idp_contact_surname,
+                  CONF.saml.idp_contact_email,
+                  CONF.saml.idp_contact_telephone]
+        for value in params:
+            if value is None:
+                return False
+
+        # Check if contact type is an invalid value
+        valid_type_values = ['technical', 'other', 'support', 'administrative',
+                             'billing']
+        if CONF.saml.idp_contact_type not in valid_type_values:
+            msg = _('idp_contact_type must be one of: [technical, other, '
+                    'support, administrative or billing.')
+            raise exception.ValidationError(msg)
+        return True
+
+    def _check_organization_values(self):
+        """Determine if organization information is included in metadata."""
+
+        params = [CONF.saml.idp_organization_name,
+                  CONF.saml.idp_organization_display_name,
+                  CONF.saml.idp_organization_url]
+        for value in params:
+            if value is None:
+                return False
+        return True
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/__init__.py b/keystone-moon/keystone/contrib/federation/migrate_repo/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/migrate.cfg b/keystone-moon/keystone/contrib/federation/migrate_repo/migrate.cfg
new file mode 100644 (file)
index 0000000..464ab62
--- /dev/null
@@ -0,0 +1,25 @@
+[db_settings]
+# Used to identify which repository this database is versioned under.
+# You can use the name of your project.
+repository_id=federation
+
+# The name of the database table used to track the schema version.
+# This name shouldn't already be used by your project.
+# If this is changed once a database is under version control, you'll need to
+# change the table name in each database too.
+version_table=migrate_version
+
+# When committing a change script, Migrate will attempt to generate the
+# sql for all supported databases; normally, if one of them fails - probably
+# because you don't have that database installed - it is ignored and the
+# commit continues, perhaps ending successfully.
+# Databases in this list MUST compile successfully during a commit, or the
+# entire commit will fail. List the databases your application will actually
+# be using to ensure your updates to that database work properly.
+# This must be a list; example: ['postgres','sqlite']
+required_dbs=[]
+
+# When creating new change scripts, Migrate will stamp the new script with
+# a version number. By default this is latest_version + 1. You can set this
+# to 'true' to tell Migrate to use the UTC timestamp instead.
+use_timestamp_numbering=False
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/001_add_identity_provider_table.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/001_add_identity_provider_table.py
new file mode 100644 (file)
index 0000000..cfb6f2c
--- /dev/null
@@ -0,0 +1,51 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+
+def upgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    idp_table = sql.Table(
+        'identity_provider',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('enabled', sql.Boolean, nullable=False),
+        sql.Column('description', sql.Text(), nullable=True),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+
+    idp_table.create(migrate_engine, checkfirst=True)
+
+    federation_protocol_table = sql.Table(
+        'federation_protocol',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('idp_id', sql.String(64),
+                   sql.ForeignKey('identity_provider.id', ondelete='CASCADE'),
+                   primary_key=True),
+        sql.Column('mapping_id', sql.String(64), nullable=True),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+
+    federation_protocol_table.create(migrate_engine, checkfirst=True)
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+    tables = ['federation_protocol', 'identity_provider']
+    for table_name in tables:
+        table = sql.Table(table_name, meta, autoload=True)
+        table.drop()
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/002_add_mapping_tables.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/002_add_mapping_tables.py
new file mode 100644 (file)
index 0000000..f827f9a
--- /dev/null
@@ -0,0 +1,37 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+
+def upgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    mapping_table = sql.Table(
+        'mapping',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('rules', sql.Text(), nullable=False),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+    mapping_table.create(migrate_engine, checkfirst=True)
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+    # Drop previously created tables
+    tables = ['mapping']
+    for table_name in tables:
+        table = sql.Table(table_name, meta, autoload=True)
+        table.drop()
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/003_mapping_id_nullable_false.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/003_mapping_id_nullable_false.py
new file mode 100644 (file)
index 0000000..eb8b237
--- /dev/null
@@ -0,0 +1,35 @@
+# Copyright 2014 Mirantis.inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sa
+
+
+def upgrade(migrate_engine):
+    meta = sa.MetaData(bind=migrate_engine)
+    federation_protocol = sa.Table('federation_protocol', meta, autoload=True)
+    # NOTE(i159): The column is changed to non-nullable. To prevent
+    # database errors when the column will be altered, all the existing
+    # null-records should be filled with not null values.
+    stmt = (federation_protocol.update().
+            where(federation_protocol.c.mapping_id.is_(None)).
+            values(mapping_id=''))
+    migrate_engine.execute(stmt)
+    federation_protocol.c.mapping_id.alter(nullable=False)
+
+
+def downgrade(migrate_engine):
+    meta = sa.MetaData(bind=migrate_engine)
+    federation_protocol = sa.Table('federation_protocol', meta, autoload=True)
+    federation_protocol.c.mapping_id.alter(nullable=True)
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/004_add_remote_id_column.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/004_add_remote_id_column.py
new file mode 100644 (file)
index 0000000..dbe5d1f
--- /dev/null
@@ -0,0 +1,30 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_db.sqlalchemy import utils
+import sqlalchemy as sql
+
+
+def upgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    idp_table = utils.get_table(migrate_engine, 'identity_provider')
+    remote_id = sql.Column('remote_id', sql.String(256), nullable=True)
+    idp_table.create_column(remote_id)
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+    idp_table = utils.get_table(migrate_engine, 'identity_provider')
+    idp_table.drop_column('remote_id')
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/005_add_service_provider_table.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/005_add_service_provider_table.py
new file mode 100644 (file)
index 0000000..bff6a25
--- /dev/null
@@ -0,0 +1,38 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+
+def upgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    sp_table = sql.Table(
+        'service_provider',
+        meta,
+        sql.Column('auth_url', sql.String(256), nullable=True),
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('enabled', sql.Boolean, nullable=False),
+        sql.Column('description', sql.Text(), nullable=True),
+        sql.Column('sp_url', sql.String(256), nullable=True),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+
+    sp_table.create(migrate_engine, checkfirst=True)
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+    table = sql.Table('service_provider', meta, autoload=True)
+    table.drop()
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/006_fixup_service_provider_attributes.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/006_fixup_service_provider_attributes.py
new file mode 100644 (file)
index 0000000..8a42ce3
--- /dev/null
@@ -0,0 +1,48 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+_SP_TABLE_NAME = 'service_provider'
+
+
+def _update_null_columns(migrate_engine, sp_table):
+    stmt = (sp_table.update().
+            where(sp_table.c.auth_url.is_(None)).
+            values(auth_url=''))
+    migrate_engine.execute(stmt)
+
+    stmt = (sp_table.update().
+            where(sp_table.c.sp_url.is_(None)).
+            values(sp_url=''))
+    migrate_engine.execute(stmt)
+
+
+def upgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+    sp_table = sql.Table(_SP_TABLE_NAME, meta, autoload=True)
+    # The columns are being changed to non-nullable. To prevent
+    # database errors when both are altered, all the existing
+    # null-records should be filled with not null values.
+    _update_null_columns(migrate_engine, sp_table)
+
+    sp_table.c.auth_url.alter(nullable=False)
+    sp_table.c.sp_url.alter(nullable=False)
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+    sp_table = sql.Table(_SP_TABLE_NAME, meta, autoload=True)
+    sp_table.c.auth_url.alter(nullable=True)
+    sp_table.c.sp_url.alter(nullable=True)
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/__init__.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/contrib/federation/routers.py b/keystone-moon/keystone/contrib/federation/routers.py
new file mode 100644 (file)
index 0000000..9a6224b
--- /dev/null
@@ -0,0 +1,226 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+
+from keystone.common import json_home
+from keystone.common import wsgi
+from keystone.contrib.federation import controllers
+
+
+build_resource_relation = functools.partial(
+    json_home.build_v3_extension_resource_relation,
+    extension_name='OS-FEDERATION', extension_version='1.0')
+
+build_parameter_relation = functools.partial(
+    json_home.build_v3_extension_parameter_relation,
+    extension_name='OS-FEDERATION', extension_version='1.0')
+
+IDP_ID_PARAMETER_RELATION = build_parameter_relation(parameter_name='idp_id')
+PROTOCOL_ID_PARAMETER_RELATION = build_parameter_relation(
+    parameter_name='protocol_id')
+SP_ID_PARAMETER_RELATION = build_parameter_relation(parameter_name='sp_id')
+
+
+class FederationExtension(wsgi.V3ExtensionRouter):
+    """API Endpoints for the Federation extension.
+
+    The API looks like::
+
+        PUT /OS-FEDERATION/identity_providers/$identity_provider
+        GET /OS-FEDERATION/identity_providers
+        GET /OS-FEDERATION/identity_providers/$identity_provider
+        DELETE /OS-FEDERATION/identity_providers/$identity_provider
+        PATCH /OS-FEDERATION/identity_providers/$identity_provider
+
+        PUT /OS-FEDERATION/identity_providers/
+            $identity_provider/protocols/$protocol
+        GET /OS-FEDERATION/identity_providers/
+            $identity_provider/protocols
+        GET /OS-FEDERATION/identity_providers/
+            $identity_provider/protocols/$protocol
+        PATCH /OS-FEDERATION/identity_providers/
+            $identity_provider/protocols/$protocol
+        DELETE /OS-FEDERATION/identity_providers/
+            $identity_provider/protocols/$protocol
+
+        PUT /OS-FEDERATION/mappings
+        GET /OS-FEDERATION/mappings
+        PATCH /OS-FEDERATION/mappings/$mapping_id
+        GET /OS-FEDERATION/mappings/$mapping_id
+        DELETE /OS-FEDERATION/mappings/$mapping_id
+
+        GET /OS-FEDERATION/projects
+        GET /OS-FEDERATION/domains
+
+        PUT /OS-FEDERATION/service_providers/$service_provider
+        GET /OS-FEDERATION/service_providers
+        GET /OS-FEDERATION/service_providers/$service_provider
+        DELETE /OS-FEDERATION/service_providers/$service_provider
+        PATCH /OS-FEDERATION/service_providers/$service_provider
+
+        GET /OS-FEDERATION/identity_providers/$identity_provider/
+            protocols/$protocol/auth
+        POST /OS-FEDERATION/identity_providers/$identity_provider/
+            protocols/$protocol/auth
+
+        POST /auth/OS-FEDERATION/saml2
+        GET /OS-FEDERATION/saml2/metadata
+
+        GET /auth/OS-FEDERATION/websso/{protocol_id}
+            ?origin=https%3A//horizon.example.com
+
+        POST /auth/OS-FEDERATION/websso/{protocol_id}
+             ?origin=https%3A//horizon.example.com
+
+    """
+    def _construct_url(self, suffix):
+        return "/OS-FEDERATION/%s" % suffix
+
+    def add_routes(self, mapper):
+        auth_controller = controllers.Auth()
+        idp_controller = controllers.IdentityProvider()
+        protocol_controller = controllers.FederationProtocol()
+        mapping_controller = controllers.MappingController()
+        project_controller = controllers.ProjectAssignmentV3()
+        domain_controller = controllers.DomainV3()
+        saml_metadata_controller = controllers.SAMLMetadataV3()
+        sp_controller = controllers.ServiceProvider()
+
+        # Identity Provider CRUD operations
+
+        self._add_resource(
+            mapper, idp_controller,
+            path=self._construct_url('identity_providers/{idp_id}'),
+            get_action='get_identity_provider',
+            put_action='create_identity_provider',
+            patch_action='update_identity_provider',
+            delete_action='delete_identity_provider',
+            rel=build_resource_relation(resource_name='identity_provider'),
+            path_vars={
+                'idp_id': IDP_ID_PARAMETER_RELATION,
+            })
+        self._add_resource(
+            mapper, idp_controller,
+            path=self._construct_url('identity_providers'),
+            get_action='list_identity_providers',
+            rel=build_resource_relation(resource_name='identity_providers'))
+
+        # Protocol CRUD operations
+
+        self._add_resource(
+            mapper, protocol_controller,
+            path=self._construct_url('identity_providers/{idp_id}/protocols/'
+                                     '{protocol_id}'),
+            get_action='get_protocol',
+            put_action='create_protocol',
+            patch_action='update_protocol',
+            delete_action='delete_protocol',
+            rel=build_resource_relation(
+                resource_name='identity_provider_protocol'),
+            path_vars={
+                'idp_id': IDP_ID_PARAMETER_RELATION,
+                'protocol_id': PROTOCOL_ID_PARAMETER_RELATION,
+            })
+        self._add_resource(
+            mapper, protocol_controller,
+            path=self._construct_url('identity_providers/{idp_id}/protocols'),
+            get_action='list_protocols',
+            rel=build_resource_relation(
+                resource_name='identity_provider_protocols'),
+            path_vars={
+                'idp_id': IDP_ID_PARAMETER_RELATION,
+            })
+
+        # Mapping CRUD operations
+
+        self._add_resource(
+            mapper, mapping_controller,
+            path=self._construct_url('mappings/{mapping_id}'),
+            get_action='get_mapping',
+            put_action='create_mapping',
+            patch_action='update_mapping',
+            delete_action='delete_mapping',
+            rel=build_resource_relation(resource_name='mapping'),
+            path_vars={
+                'mapping_id': build_parameter_relation(
+                    parameter_name='mapping_id'),
+            })
+        self._add_resource(
+            mapper, mapping_controller,
+            path=self._construct_url('mappings'),
+            get_action='list_mappings',
+            rel=build_resource_relation(resource_name='mappings'))
+
+        # Service Providers CRUD operations
+
+        self._add_resource(
+            mapper, sp_controller,
+            path=self._construct_url('service_providers/{sp_id}'),
+            get_action='get_service_provider',
+            put_action='create_service_provider',
+            patch_action='update_service_provider',
+            delete_action='delete_service_provider',
+            rel=build_resource_relation(resource_name='service_provider'),
+            path_vars={
+                'sp_id': SP_ID_PARAMETER_RELATION,
+            })
+
+        self._add_resource(
+            mapper, sp_controller,
+            path=self._construct_url('service_providers'),
+            get_action='list_service_providers',
+            rel=build_resource_relation(resource_name='service_providers'))
+
+        self._add_resource(
+            mapper, domain_controller,
+            path=self._construct_url('domains'),
+            get_action='list_domains_for_groups',
+            rel=build_resource_relation(resource_name='domains'))
+        self._add_resource(
+            mapper, project_controller,
+            path=self._construct_url('projects'),
+            get_action='list_projects_for_groups',
+            rel=build_resource_relation(resource_name='projects'))
+        self._add_resource(
+            mapper, auth_controller,
+            path=self._construct_url('identity_providers/{identity_provider}/'
+                                     'protocols/{protocol}/auth'),
+            get_post_action='federated_authentication',
+            rel=build_resource_relation(
+                resource_name='identity_provider_protocol_auth'),
+            path_vars={
+                'identity_provider': IDP_ID_PARAMETER_RELATION,
+                'protocol': PROTOCOL_ID_PARAMETER_RELATION,
+            })
+
+        # Auth operations
+        self._add_resource(
+            mapper, auth_controller,
+            path='/auth' + self._construct_url('saml2'),
+            post_action='create_saml_assertion',
+            rel=build_resource_relation(resource_name='saml2'))
+        self._add_resource(
+            mapper, auth_controller,
+            path='/auth' + self._construct_url('websso/{protocol_id}'),
+            get_post_action='federated_sso_auth',
+            rel=build_resource_relation(resource_name='websso'),
+            path_vars={
+                'protocol_id': PROTOCOL_ID_PARAMETER_RELATION,
+            })
+
+        # Keystone-Identity-Provider metadata endpoint
+        self._add_resource(
+            mapper, saml_metadata_controller,
+            path=self._construct_url('saml2/metadata'),
+            get_action='get_metadata',
+            rel=build_resource_relation(resource_name='metadata'))
diff --git a/keystone-moon/keystone/contrib/federation/schema.py b/keystone-moon/keystone/contrib/federation/schema.py
new file mode 100644 (file)
index 0000000..645e112
--- /dev/null
@@ -0,0 +1,78 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import validation
+from keystone.common.validation import parameter_types
+
+
+basic_property_id = {
+    'type': 'object',
+    'properties': {
+        'id': {
+            'type': 'string'
+        }
+    },
+    'required': ['id'],
+    'additionalProperties': False
+}
+
+saml_create = {
+    'type': 'object',
+    'properties': {
+        'identity': {
+            'type': 'object',
+            'properties': {
+                'token': basic_property_id,
+                'methods': {
+                    'type': 'array'
+                }
+            },
+            'required': ['token'],
+            'additionalProperties': False
+        },
+        'scope': {
+            'type': 'object',
+            'properties': {
+                'service_provider': basic_property_id
+            },
+            'required': ['service_provider'],
+            'additionalProperties': False
+        },
+    },
+    'required': ['identity', 'scope'],
+    'additionalProperties': False
+}
+
+_service_provider_properties = {
+    # NOTE(rodrigods): The database accepts URLs with 256 as max length,
+    # but parameter_types.url uses 225 as max length.
+    'auth_url': parameter_types.url,
+    'sp_url': parameter_types.url,
+    'description': validation.nullable(parameter_types.description),
+    'enabled': parameter_types.boolean
+}
+
+service_provider_create = {
+    'type': 'object',
+    'properties': _service_provider_properties,
+    # NOTE(rodrigods): 'id' is not required since it is passed in the URL
+    'required': ['auth_url', 'sp_url'],
+    'additionalProperties': False
+}
+
+service_provider_update = {
+    'type': 'object',
+    'properties': _service_provider_properties,
+    # Make sure at least one property is being updated
+    'minProperties': 1,
+    'additionalProperties': False
+}
diff --git a/keystone-moon/keystone/contrib/federation/utils.py b/keystone-moon/keystone/contrib/federation/utils.py
new file mode 100644 (file)
index 0000000..939fe9a
--- /dev/null
@@ -0,0 +1,763 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Utilities for Federation Extension."""
+
+import ast
+import re
+
+import jsonschema
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import timeutils
+import six
+
+from keystone.contrib import federation
+from keystone import exception
+from keystone.i18n import _, _LW
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+MAPPING_SCHEMA = {
+    "type": "object",
+    "required": ['rules'],
+    "properties": {
+        "rules": {
+            "minItems": 1,
+            "type": "array",
+            "items": {
+                "type": "object",
+                "required": ['local', 'remote'],
+                "additionalProperties": False,
+                "properties": {
+                    "local": {
+                        "type": "array"
+                    },
+                    "remote": {
+                        "minItems": 1,
+                        "type": "array",
+                        "items": {
+                            "type": "object",
+                            "oneOf": [
+                                {"$ref": "#/definitions/empty"},
+                                {"$ref": "#/definitions/any_one_of"},
+                                {"$ref": "#/definitions/not_any_of"},
+                                {"$ref": "#/definitions/blacklist"},
+                                {"$ref": "#/definitions/whitelist"}
+                            ],
+                        }
+                    }
+                }
+            }
+        }
+    },
+    "definitions": {
+        "empty": {
+            "type": "object",
+            "required": ['type'],
+            "properties": {
+                "type": {
+                    "type": "string"
+                },
+            },
+            "additionalProperties": False,
+        },
+        "any_one_of": {
+            "type": "object",
+            "additionalProperties": False,
+            "required": ['type', 'any_one_of'],
+            "properties": {
+                "type": {
+                    "type": "string"
+                },
+                "any_one_of": {
+                    "type": "array"
+                },
+                "regex": {
+                    "type": "boolean"
+                }
+            }
+        },
+        "not_any_of": {
+            "type": "object",
+            "additionalProperties": False,
+            "required": ['type', 'not_any_of'],
+            "properties": {
+                "type": {
+                    "type": "string"
+                },
+                "not_any_of": {
+                    "type": "array"
+                },
+                "regex": {
+                    "type": "boolean"
+                }
+            }
+        },
+        "blacklist": {
+            "type": "object",
+            "additionalProperties": False,
+            "required": ['type', 'blacklist'],
+            "properties": {
+                "type": {
+                    "type": "string"
+                },
+                "blacklist": {
+                    "type": "array"
+                }
+            }
+        },
+        "whitelist": {
+            "type": "object",
+            "additionalProperties": False,
+            "required": ['type', 'whitelist'],
+            "properties": {
+                "type": {
+                    "type": "string"
+                },
+                "whitelist": {
+                    "type": "array"
+                }
+            }
+        }
+    }
+}
+
+
+class DirectMaps(object):
+    """An abstraction around the remote matches.
+
+    Each match is treated internally as a list.
+    """
+
+    def __init__(self):
+        self._matches = []
+
+    def add(self, values):
+        """Adds a matched value to the list of matches.
+
+        :param list value: the match to save
+
+        """
+        self._matches.append(values)
+
+    def __getitem__(self, idx):
+        """Used by Python when executing ``''.format(*DirectMaps())``."""
+        value = self._matches[idx]
+        if isinstance(value, list) and len(value) == 1:
+            return value[0]
+        else:
+            return value
+
+
+def validate_mapping_structure(ref):
+    v = jsonschema.Draft4Validator(MAPPING_SCHEMA)
+
+    messages = ''
+    for error in sorted(v.iter_errors(ref), key=str):
+        messages = messages + error.message + "\n"
+
+    if messages:
+        raise exception.ValidationError(messages)
+
+
+def validate_expiration(token_ref):
+    if timeutils.utcnow() > token_ref.expires:
+        raise exception.Unauthorized(_('Federation token is expired'))
+
+
+def validate_groups_cardinality(group_ids, mapping_id):
+    """Check if groups list is non-empty.
+
+    :param group_ids: list of group ids
+    :type group_ids: list of str
+
+    :raises exception.MissingGroups: if ``group_ids`` cardinality is 0
+
+    """
+    if not group_ids:
+        raise exception.MissingGroups(mapping_id=mapping_id)
+
+
+def validate_idp(idp, assertion):
+    """Check if the IdP providing the assertion is the one registered for
+       the mapping
+    """
+    remote_id_parameter = CONF.federation.remote_id_attribute
+    if not remote_id_parameter or not idp['remote_id']:
+        LOG.warning(_LW('Impossible to identify the IdP %s '),
+                    idp['id'])
+        # If nothing is defined, the administrator may want to
+        # allow the mapping of every IdP
+        return
+    try:
+        idp_remote_identifier = assertion[remote_id_parameter]
+    except KeyError:
+        msg = _('Could not find Identity Provider identifier in '
+                'environment, check [federation] remote_id_attribute '
+                'for details.')
+        raise exception.ValidationError(msg)
+    if idp_remote_identifier != idp['remote_id']:
+        msg = _('Incoming identity provider identifier not included '
+                'among the accepted identifiers.')
+        raise exception.Forbidden(msg)
+
+
+def validate_groups_in_backend(group_ids, mapping_id, identity_api):
+    """Iterate over group ids and make sure they are present in the backend/
+
+    This call is not transactional.
+    :param group_ids: IDs of the groups to be checked
+    :type group_ids: list of str
+
+    :param mapping_id: id of the mapping used for this operation
+    :type mapping_id: str
+
+    :param identity_api: Identity Manager object used for communication with
+                         backend
+    :type identity_api: identity.Manager
+
+    :raises: exception.MappedGroupNotFound
+
+    """
+    for group_id in group_ids:
+        try:
+            identity_api.get_group(group_id)
+        except exception.GroupNotFound:
+            raise exception.MappedGroupNotFound(
+                group_id=group_id, mapping_id=mapping_id)
+
+
+def validate_groups(group_ids, mapping_id, identity_api):
+    """Check group ids cardinality and check their existence in the backend.
+
+    This call is not transactional.
+    :param group_ids: IDs of the groups to be checked
+    :type group_ids: list of str
+
+    :param mapping_id: id of the mapping used for this operation
+    :type mapping_id: str
+
+    :param identity_api: Identity Manager object used for communication with
+                         backend
+    :type identity_api: identity.Manager
+
+    :raises: exception.MappedGroupNotFound
+    :raises: exception.MissingGroups
+
+    """
+    validate_groups_cardinality(group_ids, mapping_id)
+    validate_groups_in_backend(group_ids, mapping_id, identity_api)
+
+
+# TODO(marek-denis): Optimize this function, so the number of calls to the
+# backend are minimized.
+def transform_to_group_ids(group_names, mapping_id,
+                           identity_api, assignment_api):
+    """Transform groups identitified by name/domain to their ids
+
+    Function accepts list of groups identified by a name and domain giving
+    a list of group ids in return.
+
+    Example of group_names parameter::
+
+        [
+            {
+                "name": "group_name",
+                "domain": {
+                    "id": "domain_id"
+                },
+            },
+            {
+                "name": "group_name_2",
+                "domain": {
+                    "name": "domain_name"
+                }
+            }
+        ]
+
+    :param group_names: list of group identified by name and its domain.
+    :type group_names: list
+
+    :param mapping_id: id of the mapping used for mapping assertion into
+        local credentials
+    :type mapping_id: str
+
+    :param identity_api: identity_api object
+    :param assignment_api: assignment_api object
+
+    :returns: generator object with group ids
+
+    :raises: excepton.MappedGroupNotFound: in case asked group doesn't
+        exist in the backend.
+
+    """
+
+    def resolve_domain(domain):
+        """Return domain id.
+
+        Input is a dictionary with a domain identified either by a ``id`` or a
+        ``name``. In the latter case system will attempt to fetch domain object
+        from the backend.
+
+        :returns: domain's id
+        :rtype: str
+
+        """
+        domain_id = (domain.get('id') or
+                     assignment_api.get_domain_by_name(
+                     domain.get('name')).get('id'))
+        return domain_id
+
+    for group in group_names:
+        try:
+            group_dict = identity_api.get_group_by_name(
+                group['name'], resolve_domain(group['domain']))
+            yield group_dict['id']
+        except exception.GroupNotFound:
+            LOG.debug('Skip mapping group %s; has no entry in the backend',
+                      group['name'])
+
+
+def get_assertion_params_from_env(context):
+    LOG.debug('Environment variables: %s', context['environment'])
+    prefix = CONF.federation.assertion_prefix
+    for k, v in context['environment'].items():
+        if k.startswith(prefix):
+            yield (k, v)
+
+
+class UserType(object):
+    """User mapping type."""
+    EPHEMERAL = 'ephemeral'
+    LOCAL = 'local'
+
+
+class RuleProcessor(object):
+    """A class to process assertions and mapping rules."""
+
+    class _EvalType(object):
+        """Mapping rule evaluation types."""
+        ANY_ONE_OF = 'any_one_of'
+        NOT_ANY_OF = 'not_any_of'
+        BLACKLIST = 'blacklist'
+        WHITELIST = 'whitelist'
+
+    def __init__(self, rules):
+        """Initialize RuleProcessor.
+
+        Example rules can be found at:
+        :class:`keystone.tests.mapping_fixtures`
+
+        :param rules: rules from a mapping
+        :type rules: dict
+
+        """
+
+        self.rules = rules
+
+    def process(self, assertion_data):
+        """Transform assertion to a dictionary of user name and group ids
+        based on mapping rules.
+
+        This function will iterate through the mapping rules to find
+        assertions that are valid.
+
+        :param assertion_data: an assertion containing values from an IdP
+        :type assertion_data: dict
+
+        Example assertion_data::
+
+            {
+                'Email': 'testacct@example.com',
+                'UserName': 'testacct',
+                'FirstName': 'Test',
+                'LastName': 'Account',
+                'orgPersonType': 'Tester'
+            }
+
+        :returns: dictionary with user and group_ids
+
+        The expected return structure is::
+
+            {
+                'name': 'foobar',
+                'group_ids': ['abc123', 'def456'],
+                'group_names': [
+                    {
+                        'name': 'group_name_1',
+                        'domain': {
+                            'name': 'domain1'
+                        }
+                    },
+                    {
+                        'name': 'group_name_1_1',
+                        'domain': {
+                            'name': 'domain1'
+                        }
+                    },
+                    {
+                        'name': 'group_name_2',
+                        'domain': {
+                            'id': 'xyz132'
+                        }
+                    }
+                ]
+            }
+
+        """
+
+        # Assertions will come in as string key-value pairs, and will use a
+        # semi-colon to indicate multiple values, i.e. groups.
+        # This will create a new dictionary where the values are arrays, and
+        # any multiple values are stored in the arrays.
+        LOG.debug('assertion data: %s', assertion_data)
+        assertion = {n: v.split(';') for n, v in assertion_data.items()
+                     if isinstance(v, six.string_types)}
+        LOG.debug('assertion: %s', assertion)
+        identity_values = []
+
+        LOG.debug('rules: %s', self.rules)
+        for rule in self.rules:
+            direct_maps = self._verify_all_requirements(rule['remote'],
+                                                        assertion)
+
+            # If the compare comes back as None, then the rule did not apply
+            # to the assertion data, go on to the next rule
+            if direct_maps is None:
+                continue
+
+            # If there are no direct mappings, then add the local mapping
+            # directly to the array of saved values. However, if there is
+            # a direct mapping, then perform variable replacement.
+            if not direct_maps:
+                identity_values += rule['local']
+            else:
+                for local in rule['local']:
+                    new_local = self._update_local_mapping(local, direct_maps)
+                    identity_values.append(new_local)
+
+        LOG.debug('identity_values: %s', identity_values)
+        mapped_properties = self._transform(identity_values)
+        LOG.debug('mapped_properties: %s', mapped_properties)
+        return mapped_properties
+
+    def _transform(self, identity_values):
+        """Transform local mappings, to an easier to understand format.
+
+        Transform the incoming array to generate the return value for
+        the process function. Generating content for Keystone tokens will
+        be easier if some pre-processing is done at this level.
+
+        :param identity_values: local mapping from valid evaluations
+        :type identity_values: array of dict
+
+        Example identity_values::
+
+            [
+                {
+                    'group': {'id': '0cd5e9'},
+                    'user': {
+                        'email': 'bob@example.com'
+                    },
+                },
+                {
+                    'groups': ['member', 'admin', tester'],
+                    'domain': {
+                        'name': 'default_domain'
+                    }
+                }
+            ]
+
+        :returns: dictionary with user name, group_ids and group_names.
+        :rtype: dict
+
+        """
+
+        def extract_groups(groups_by_domain):
+            for groups in groups_by_domain.values():
+                for group in {g['name']: g for g in groups}.values():
+                    yield group
+
+        def normalize_user(user):
+            """Parse and validate user mapping."""
+
+            user_type = user.get('type')
+
+            if user_type and user_type not in (UserType.EPHEMERAL,
+                                               UserType.LOCAL):
+                msg = _("User type %s not supported") % user_type
+                raise exception.ValidationError(msg)
+
+            if user_type is None:
+                user_type = user['type'] = UserType.EPHEMERAL
+
+            if user_type == UserType.EPHEMERAL:
+                user['domain'] = {
+                    'id': (CONF.federation.federated_domain_name or
+                           federation.FEDERATED_DOMAIN_KEYWORD)
+                }
+
+        # initialize the group_ids as a set to eliminate duplicates
+        user = {}
+        group_ids = set()
+        group_names = list()
+        groups_by_domain = dict()
+
+        for identity_value in identity_values:
+            if 'user' in identity_value:
+                # if a mapping outputs more than one user name, log it
+                if user:
+                    LOG.warning(_LW('Ignoring user name'))
+                else:
+                    user = identity_value.get('user')
+            if 'group' in identity_value:
+                group = identity_value['group']
+                if 'id' in group:
+                    group_ids.add(group['id'])
+                elif 'name' in group:
+                    domain = (group['domain'].get('name') or
+                              group['domain'].get('id'))
+                    groups_by_domain.setdefault(domain, list()).append(group)
+                group_names.extend(extract_groups(groups_by_domain))
+            if 'groups' in identity_value:
+                if 'domain' not in identity_value:
+                    msg = _("Invalid rule: %(identity_value)s. Both 'groups' "
+                            "and 'domain' keywords must be specified.")
+                    msg = msg % {'identity_value': identity_value}
+                    raise exception.ValidationError(msg)
+                # In this case, identity_value['groups'] is a string
+                # representation of a list, and we want a real list.  This is
+                # due to the way we do direct mapping substitutions today (see
+                # function _update_local_mapping() )
+                try:
+                    group_names_list = ast.literal_eval(
+                        identity_value['groups'])
+                except ValueError:
+                    group_names_list = [identity_value['groups']]
+                domain = identity_value['domain']
+                group_dicts = [{'name': name, 'domain': domain} for name in
+                               group_names_list]
+
+                group_names.extend(group_dicts)
+
+        normalize_user(user)
+
+        return {'user': user,
+                'group_ids': list(group_ids),
+                'group_names': group_names}
+
+    def _update_local_mapping(self, local, direct_maps):
+        """Replace any {0}, {1} ... values with data from the assertion.
+
+        :param local: local mapping reference that needs to be updated
+        :type local: dict
+        :param direct_maps: identity values used to update local
+        :type direct_maps: keystone.contrib.federation.utils.DirectMaps
+
+        Example local::
+
+            {'user': {'name': '{0} {1}', 'email': '{2}'}}
+
+        Example direct_maps::
+
+            ['Bob', 'Thompson', 'bob@example.com']
+
+        :returns: new local mapping reference with replaced values.
+
+        The expected return structure is::
+
+            {'user': {'name': 'Bob Thompson', 'email': 'bob@example.org'}}
+
+        """
+
+        LOG.debug('direct_maps: %s', direct_maps)
+        LOG.debug('local: %s', local)
+        new = {}
+        for k, v in six.iteritems(local):
+            if isinstance(v, dict):
+                new_value = self._update_local_mapping(v, direct_maps)
+            else:
+                new_value = v.format(*direct_maps)
+            new[k] = new_value
+        return new
+
+    def _verify_all_requirements(self, requirements, assertion):
+        """Go through the remote requirements of a rule, and compare against
+        the assertion.
+
+        If a value of ``None`` is returned, the rule with this assertion
+        doesn't apply.
+        If an array of zero length is returned, then there are no direct
+        mappings to be performed, but the rule is valid.
+        Otherwise, then it will first attempt to filter the values according
+        to blacklist or whitelist rules and finally return the values in
+        order, to be directly mapped.
+
+        :param requirements: list of remote requirements from rules
+        :type requirements: list
+
+        Example requirements::
+
+            [
+                {
+                    "type": "UserName"
+                },
+                {
+                    "type": "orgPersonType",
+                    "any_one_of": [
+                        "Customer"
+                    ]
+                },
+                {
+                    "type": "ADFS_GROUPS",
+                    "whitelist": [
+                        "g1", "g2", "g3", "g4"
+                    ]
+                }
+            ]
+
+        :param assertion: dict of attributes from an IdP
+        :type assertion: dict
+
+        Example assertion::
+
+            {
+                'UserName': ['testacct'],
+                'LastName': ['Account'],
+                'orgPersonType': ['Tester'],
+                'Email': ['testacct@example.com'],
+                'FirstName': ['Test'],
+                'ADFS_GROUPS': ['g1', 'g2']
+            }
+
+        :returns: identity values used to update local
+        :rtype: keystone.contrib.federation.utils.DirectMaps
+
+        """
+
+        direct_maps = DirectMaps()
+
+        for requirement in requirements:
+            requirement_type = requirement['type']
+            regex = requirement.get('regex', False)
+
+            any_one_values = requirement.get(self._EvalType.ANY_ONE_OF)
+            if any_one_values is not None:
+                if self._evaluate_requirement(any_one_values,
+                                              requirement_type,
+                                              self._EvalType.ANY_ONE_OF,
+                                              regex,
+                                              assertion):
+                    continue
+                else:
+                    return None
+
+            not_any_values = requirement.get(self._EvalType.NOT_ANY_OF)
+            if not_any_values is not None:
+                if self._evaluate_requirement(not_any_values,
+                                              requirement_type,
+                                              self._EvalType.NOT_ANY_OF,
+                                              regex,
+                                              assertion):
+                    continue
+                else:
+                    return None
+
+            # If 'any_one_of' or 'not_any_of' are not found, then values are
+            # within 'type'. Attempt to find that 'type' within the assertion,
+            # and filter these values if 'whitelist' or 'blacklist' is set.
+            direct_map_values = assertion.get(requirement_type)
+            if direct_map_values:
+                blacklisted_values = requirement.get(self._EvalType.BLACKLIST)
+                whitelisted_values = requirement.get(self._EvalType.WHITELIST)
+
+                # If a blacklist or whitelist is used, we want to map to the
+                # whole list instead of just its values separately.
+                if blacklisted_values:
+                    direct_map_values = [v for v in direct_map_values
+                                         if v not in blacklisted_values]
+                elif whitelisted_values:
+                    direct_map_values = [v for v in direct_map_values
+                                         if v in whitelisted_values]
+
+                direct_maps.add(direct_map_values)
+
+                LOG.debug('updating a direct mapping: %s', direct_map_values)
+
+        return direct_maps
+
+    def _evaluate_values_by_regex(self, values, assertion_values):
+        for value in values:
+            for assertion_value in assertion_values:
+                if re.search(value, assertion_value):
+                    return True
+        return False
+
+    def _evaluate_requirement(self, values, requirement_type,
+                              eval_type, regex, assertion):
+        """Evaluate the incoming requirement and assertion.
+
+        If the requirement type does not exist in the assertion data, then
+        return False. If regex is specified, then compare the values and
+        assertion values. Otherwise, grab the intersection of the values
+        and use that to compare against the evaluation type.
+
+        :param values: list of allowed values, defined in the requirement
+        :type values: list
+        :param requirement_type: key to look for in the assertion
+        :type requirement_type: string
+        :param eval_type: determine how to evaluate requirements
+        :type eval_type: string
+        :param regex: perform evaluation with regex
+        :type regex: boolean
+        :param assertion: dict of attributes from the IdP
+        :type assertion: dict
+
+        :returns: boolean, whether requirement is valid or not.
+
+        """
+
+        assertion_values = assertion.get(requirement_type)
+        if not assertion_values:
+            return False
+
+        if regex:
+            any_match = self._evaluate_values_by_regex(values,
+                                                       assertion_values)
+        else:
+            any_match = bool(set(values).intersection(set(assertion_values)))
+        if any_match and eval_type == self._EvalType.ANY_ONE_OF:
+            return True
+        if not any_match and eval_type == self._EvalType.NOT_ANY_OF:
+            return True
+
+        return False
+
+
+def assert_enabled_identity_provider(federation_api, idp_id):
+    identity_provider = federation_api.get_idp(idp_id)
+    if identity_provider.get('enabled') is not True:
+        msg = _('Identity Provider %(idp)s is disabled') % {'idp': idp_id}
+        LOG.debug(msg)
+        raise exception.Forbidden(msg)
+
+
+def assert_enabled_service_provider_object(service_provider):
+    if service_provider.get('enabled') is not True:
+        sp_id = service_provider['id']
+        msg = _('Service Provider %(sp)s is disabled') % {'sp': sp_id}
+        LOG.debug(msg)
+        raise exception.Forbidden(msg)
diff --git a/keystone-moon/keystone/contrib/moon/__init__.py b/keystone-moon/keystone/contrib/moon/__init__.py
new file mode 100644 (file)
index 0000000..6a96782
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
+
+from keystone.contrib.moon.core import *  # noqa
+from keystone.contrib.moon import controllers  # noqa
+from keystone.contrib.moon import routers  # noqa
\ No newline at end of file
diff --git a/keystone-moon/keystone/contrib/moon/backends/__init__.py b/keystone-moon/keystone/contrib/moon/backends/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/contrib/moon/backends/flat.py b/keystone-moon/keystone/contrib/moon/backends/flat.py
new file mode 100644 (file)
index 0000000..6d18d3e
--- /dev/null
@@ -0,0 +1,123 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
+
+from uuid import uuid4
+import os
+import logging
+import re
+import time
+from keystone import config
+from oslo_log import log
+# from keystone.contrib.moon.core import SuperExtensionDriver
+from keystone.contrib.moon.core import LogDriver
+
+
+CONF = config.CONF
+
+
+class LogConnector(LogDriver):
+
+    AUTHZ_FILE = '/var/log/moon/authz.log'
+    TIME_FORMAT = '%Y-%m-%d-%H:%M:%S'
+
+    def __init__(self):
+        # Fixme (dthom): when logging from an other class, the %appname% in the event
+        # is always keystone.contrib.moon.backends.flat
+        super(LogConnector, self).__init__()
+        # Configure Log to add new files in /var/log/moon/authz.log and /var/log/moon/system.log
+        self.LOG = log.getLogger(__name__)
+        self.AUTHZ_LOG = logging.getLogger("authz")
+        self.AUTHZ_LOG.setLevel(logging.WARNING)
+        fh = logging.FileHandler(self.AUTHZ_FILE)
+        fh.setLevel(logging.WARNING)
+        formatter = logging.Formatter('%(asctime)s ------ %(message)s', self.TIME_FORMAT)
+        fh.setFormatter(formatter)
+        self.AUTHZ_LOG.addHandler(fh)
+
+    def authz(self, message):
+        self.AUTHZ_LOG.warn(message)
+
+    def debug(self, message):
+        self.LOG.debug(message)
+
+    def info(self, message):
+        self.LOG.info(message)
+
+    def warning(self, message):
+        self.LOG.warning(message)
+
+    def error(self, message):
+        self.LOG.error(message)
+
+    def critical(self, message):
+        self.LOG.critical(message)
+
+    def get_logs(self, options):
+        options = options.split(",")
+        self.info("Options of logs check : {}".format(options))
+        event_number = None
+        time_from = None
+        time_to = None
+        filter_str = None
+        for opt in options:
+            if "event_number" in opt:
+                event_number = "".join(re.findall("\d*", opt.split("=")[-1]))
+                try:
+                    event_number = int(event_number)
+                except ValueError:
+                    event_number = None
+            elif "from" in opt:
+                time_from = "".join(re.findall("[\w\-:]*", opt.split("=")[-1]))
+                try:
+                    time_from = time.strptime(time_from, self.TIME_FORMAT)
+                except ValueError:
+                    time_from = None
+            elif "to" in opt:
+                time_to = "".join(re.findall("[\w\-:] *", opt.split("=")[-1]))
+                try:
+                    time_to = time.strptime(time_to, self.TIME_FORMAT)
+                except ValueError:
+                    time_to = None
+            elif "filter" in opt:
+                filter_str = "".join(re.findall("\w*", opt.split("=")[-1]))
+        _logs = open(self.AUTHZ_FILE).readlines()
+        if filter_str:
+            _logs = filter(lambda x: filter_str in x, _logs)
+        self.info("Options of logs check : {} {} {} {}".format(event_number, time_from, time_to, filter_str))
+        if time_from:
+            try:
+                for log in _logs:
+                    __logs = filter(lambda x: time_from <= time.strptime(x.split(" ")[0], self.TIME_FORMAT), _logs)
+                    _logs = __logs
+            except ValueError:
+                self.error("Time format error")
+        if time_to:
+            try:
+                for log in _logs:
+                    __logs = filter(lambda x: time_to >= time.strptime(x.split(" ")[0], self.TIME_FORMAT), _logs)
+                    _logs = __logs
+            except ValueError:
+                self.error("Time format error")
+        if event_number:
+            _logs = _logs[-event_number:]
+        return list(_logs)
+
+
+# class SuperExtensionConnector(SuperExtensionDriver):
+#
+#     def __init__(self):
+#         super(SuperExtensionConnector, self).__init__()
+#         # Super_Extension is loaded every time the server is started
+#         self.__uuid = uuid4().hex
+#         # self.__super_extension = Extension()
+#         _policy_abs_dir = os.path.join(CONF.moon.super_extension_directory, 'policy')
+#         # self.__super_extension.load_from_json(_policy_abs_dir)
+#
+#     def get_super_extensions(self):
+#         return None
+#
+#     def admin(self, sub, obj, act):
+#         # return self.__super_extension.authz(sub, obj, act)
+#         return True
\ No newline at end of file
diff --git a/keystone-moon/keystone/contrib/moon/backends/sql.py b/keystone-moon/keystone/contrib/moon/backends/sql.py
new file mode 100644 (file)
index 0000000..5f76e23
--- /dev/null
@@ -0,0 +1,1537 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
+
+import six
+from uuid import uuid4
+import copy
+
+from keystone import config
+from oslo_log import log
+from keystone.common import sql
+from keystone import exception
+from keystone.contrib.moon.exception import *
+from oslo_serialization import jsonutils
+from keystone.contrib.moon import IntraExtensionDriver
+from keystone.contrib.moon import TenantDriver
+# from keystone.contrib.moon import InterExtensionDriver
+
+from keystone.contrib.moon.exception import TenantError, TenantListEmptyError
+
+CONF = config.CONF
+LOG = log.getLogger(__name__)
+
+
+class IntraExtension(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'intra_extension'
+    attributes = ['id', 'name', 'model', 'description']
+    id = sql.Column(sql.String(64), primary_key=True)
+    name = sql.Column(sql.String(64), nullable=False)
+    model = sql.Column(sql.String(64), nullable=True)
+    description = sql.Column(sql.Text())
+
+    @classmethod
+    def from_dict(cls, d):
+        new_d = d.copy()
+        return cls(**new_d)
+
+    def to_dict(self):
+        return dict(six.iteritems(self))
+
+
+class Subject(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'subject'
+    attributes = ['id', 'subjects', 'intra_extension_uuid']
+    id = sql.Column(sql.String(64), primary_key=True)
+    subjects = sql.Column(sql.JsonBlob(), nullable=True)
+    intra_extension_uuid = sql.Column(sql.ForeignKey("intra_extension.id"), nullable=False)
+
+    @classmethod
+    def from_dict(cls, d):
+        new_d = d.copy()
+        return cls(**new_d)
+
+    def to_dict(self):
+        return dict(six.iteritems(self))
+
+
+class Object(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'object'
+    attributes = ['id', 'objects', 'intra_extension_uuid']
+    id = sql.Column(sql.String(64), primary_key=True)
+    objects = sql.Column(sql.JsonBlob(), nullable=True)
+    intra_extension_uuid = sql.Column(sql.ForeignKey("intra_extension.id"), nullable=False)
+
+    @classmethod
+    def from_dict(cls, d):
+        new_d = d.copy()
+        return cls(**new_d)
+
+    def to_dict(self):
+        return dict(six.iteritems(self))
+
+
+class Action(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'action'
+    attributes = ['id', 'actions', 'intra_extension_uuid']
+    id = sql.Column(sql.String(64), primary_key=True)
+    actions = sql.Column(sql.JsonBlob(), nullable=True)
+    intra_extension_uuid = sql.Column(sql.ForeignKey("intra_extension.id"), nullable=False)
+
+    @classmethod
+    def from_dict(cls, d):
+        new_d = d.copy()
+        return cls(**new_d)
+
+    def to_dict(self):
+        return dict(six.iteritems(self))
+
+
+class SubjectCategory(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'subject_category'
+    attributes = ['id', 'subject_categories', 'intra_extension_uuid']
+    id = sql.Column(sql.String(64), primary_key=True)
+    subject_categories = sql.Column(sql.JsonBlob(), nullable=True)
+    intra_extension_uuid = sql.Column(sql.ForeignKey("intra_extension.id"), nullable=False)
+
+    @classmethod
+    def from_dict(cls, d):
+        new_d = d.copy()
+        return cls(**new_d)
+
+    def to_dict(self):
+        return dict(six.iteritems(self))
+
+
+class ObjectCategory(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'object_category'
+    attributes = ['id', 'object_categories', 'intra_extension_uuid']
+    id = sql.Column(sql.String(64), primary_key=True)
+    object_categories = sql.Column(sql.JsonBlob(), nullable=True)
+    intra_extension_uuid = sql.Column(sql.ForeignKey("intra_extension.id"), nullable=False)
+
+    @classmethod
+    def from_dict(cls, d):
+        new_d = d.copy()
+        return cls(**new_d)
+
+    def to_dict(self):
+        return dict(six.iteritems(self))
+
+
+class ActionCategory(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'action_category'
+    attributes = ['id', 'action_categories', 'intra_extension_uuid']
+    id = sql.Column(sql.String(64), primary_key=True)
+    action_categories = sql.Column(sql.JsonBlob(), nullable=True)
+    intra_extension_uuid = sql.Column(sql.ForeignKey("intra_extension.id"), nullable=False)
+
+    @classmethod
+    def from_dict(cls, d):
+        new_d = d.copy()
+        return cls(**new_d)
+
+    def to_dict(self):
+        return dict(six.iteritems(self))
+
+
+class SubjectCategoryScope(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'subject_category_scope'
+    attributes = ['id', 'subject_category_scope', 'intra_extension_uuid']
+    id = sql.Column(sql.String(64), primary_key=True)
+    subject_category_scope = sql.Column(sql.JsonBlob(), nullable=True)
+    intra_extension_uuid = sql.Column(sql.ForeignKey("intra_extension.id"), nullable=False)
+
+    @classmethod
+    def from_dict(cls, d):
+        new_d = d.copy()
+        return cls(**new_d)
+
+    def to_dict(self):
+        return dict(six.iteritems(self))
+
+
+class ObjectCategoryScope(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'object_category_scope'
+    attributes = ['id', 'object_category_scope', 'intra_extension_uuid']
+    id = sql.Column(sql.String(64), primary_key=True)
+    object_category_scope = sql.Column(sql.JsonBlob(), nullable=True)
+    intra_extension_uuid = sql.Column(sql.ForeignKey("intra_extension.id"), nullable=False)
+
+    @classmethod
+    def from_dict(cls, d):
+        new_d = d.copy()
+        return cls(**new_d)
+
+    def to_dict(self):
+        return dict(six.iteritems(self))
+
+
+class ActionCategoryScope(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'action_category_scope'
+    attributes = ['id', 'action_category_scope', 'intra_extension_uuid']
+    id = sql.Column(sql.String(64), primary_key=True)
+    action_category_scope = sql.Column(sql.JsonBlob(), nullable=True)
+    intra_extension_uuid = sql.Column(sql.ForeignKey("intra_extension.id"), nullable=False)
+
+    @classmethod
+    def from_dict(cls, d):
+        new_d = d.copy()
+        return cls(**new_d)
+
+    def to_dict(self):
+        return dict(six.iteritems(self))
+
+
+class SubjectCategoryAssignment(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'subject_category_assignment'
+    attributes = ['id', 'subject_category_assignments', 'intra_extension_uuid']
+    id = sql.Column(sql.String(64), primary_key=True)
+    subject_category_assignments = sql.Column(sql.JsonBlob(), nullable=True)
+    intra_extension_uuid = sql.Column(sql.ForeignKey("intra_extension.id"), nullable=False)
+
+    @classmethod
+    def from_dict(cls, d):
+        new_d = d.copy()
+        return cls(**new_d)
+
+    def to_dict(self):
+        return dict(six.iteritems(self))
+
+
+class ObjectCategoryAssignment(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'object_category_assignment'
+    attributes = ['id', 'object_category_assignments', 'intra_extension_uuid']
+    id = sql.Column(sql.String(64), primary_key=True)
+    object_category_assignments = sql.Column(sql.JsonBlob(), nullable=True)
+    intra_extension_uuid = sql.Column(sql.ForeignKey("intra_extension.id"), nullable=False)
+
+    @classmethod
+    def from_dict(cls, d):
+        new_d = d.copy()
+        return cls(**new_d)
+
+    def to_dict(self):
+        return dict(six.iteritems(self))
+
+
+class ActionCategoryAssignment(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'action_category_assignment'
+    attributes = ['id', 'action_category_assignments', 'intra_extension_uuid']
+    id = sql.Column(sql.String(64), primary_key=True)
+    action_category_assignments = sql.Column(sql.JsonBlob(), nullable=True)
+    intra_extension_uuid = sql.Column(sql.ForeignKey("intra_extension.id"), nullable=False)
+
+    @classmethod
+    def from_dict(cls, d):
+        new_d = d.copy()
+        return cls(**new_d)
+
+    def to_dict(self):
+        return dict(six.iteritems(self))
+
+
+class MetaRule(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'metarule'
+    attributes = ['id', 'sub_meta_rules', 'aggregation', 'intra_extension_uuid']
+    id = sql.Column(sql.String(64), primary_key=True)
+    sub_meta_rules = sql.Column(sql.JsonBlob(), nullable=True)
+    aggregation = sql.Column(sql.Text(), nullable=True)
+    intra_extension_uuid = sql.Column(sql.ForeignKey("intra_extension.id"), nullable=False)
+
+    @classmethod
+    def from_dict(cls, d):
+        new_d = d.copy()
+        return cls(**new_d)
+
+    def to_dict(self):
+        return dict(six.iteritems(self))
+
+
+class Rule(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'rule'
+    attributes = ['id', 'rules', 'intra_extension_uuid']
+    id = sql.Column(sql.String(64), primary_key=True)
+    rules = sql.Column(sql.JsonBlob(), nullable=True)
+    intra_extension_uuid = sql.Column(sql.ForeignKey("intra_extension.id"), nullable=False)
+
+    @classmethod
+    def from_dict(cls, d):
+        new_d = d.copy()
+        return cls(**new_d)
+
+    def to_dict(self):
+        return dict(six.iteritems(self))
+
+
+class Tenant(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'tenants'
+    attributes = [
+        'id', 'name', 'authz', 'admin'
+    ]
+    id = sql.Column(sql.String(64), primary_key=True, nullable=False)
+    name = sql.Column(sql.String(128), nullable=True)
+    authz = sql.Column(sql.String(64), nullable=True)
+    admin = sql.Column(sql.String(64), nullable=True)
+
+    @classmethod
+    def from_dict(cls, d):
+        """Override parent from_dict() method with a different implementation.
+        """
+        new_d = d.copy()
+        uuid = new_d.keys()[0]
+        return cls(id=uuid, **new_d[uuid])
+
+    def to_dict(self):
+        """
+        """
+        tenant_dict = {}
+        for key in ("id", "name", "authz", "admin"):
+            tenant_dict[key] = getattr(self, key)
+        return tenant_dict
+
+__all_objects__ = (
+    Subject,
+    Object,
+    Action,
+    SubjectCategory,
+    ObjectCategory,
+    ActionCategory,
+    SubjectCategoryScope,
+    ObjectCategoryScope,
+    ActionCategoryScope,
+    SubjectCategoryAssignment,
+    ObjectCategoryAssignment,
+    ActionCategoryAssignment,
+    MetaRule,
+    Rule,
+)
+
+class IntraExtensionConnector(IntraExtensionDriver):
+
+    def get_intra_extension_list(self):
+        with sql.transaction() as session:
+            query = session.query(IntraExtension.id)
+            intraextensions = query.all()
+            # return intraextensions
+            return [intraextension[0] for intraextension in intraextensions]
+
+    def set_intra_extension(self, intra_id, intra_extension):
+        with sql.transaction() as session:
+            # intra_extension["admin"] = jsonutils.dumps(intra_extension["admin"])
+            # intra_extension["authz"] = jsonutils.dumps(intra_extension["authz"])
+            ie_ref = IntraExtension.from_dict(intra_extension)
+            session.add(ie_ref)
+            return IntraExtension.to_dict(ie_ref)
+
+    def get_intra_extension(self, uuid):
+        with sql.transaction() as session:
+            query = session.query(IntraExtension)
+            query = query.filter_by(id=uuid)
+            ref = query.first()
+            if not ref:
+                raise exception.NotFound
+            return ref.to_dict()
+
+    def delete_intra_extension(self, intra_extension_id):
+        with sql.transaction() as session:
+            ref = session.query(IntraExtension).get(intra_extension_id)
+            # Must delete all references to that IntraExtension
+            for _object in __all_objects__:
+                query = session.query(_object)
+                query = query.filter_by(intra_extension_uuid=intra_extension_id)
+                _ref = query.first()
+                if _ref:
+                    session.delete(_ref)
+            session.flush()
+            session.delete(ref)
+
+    # Getter and setter for name
+
+    def get_name(self, uuid):
+        intra_extension = self.get_intra_extension(uuid)
+        return intra_extension["name"]
+
+    def set_name(self, uuid, name):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # Getter and setter for model
+
+    def get_model(self, uuid):
+        intra_extension = self.get_intra_extension(uuid)
+        return intra_extension["model"]
+
+    def set_model(self, uuid, model):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # Getter and setter for description
+
+    def get_description(self, uuid):
+        intra_extension = self.get_intra_extension(uuid)
+        return intra_extension["description"]
+
+    def set_description(self, uuid, args):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def get_subject_dict(self, extension_uuid):
+        with sql.transaction() as session:
+            query = session.query(Subject)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            return ref.to_dict()
+
+    def set_subject_dict(self, extension_uuid, subject_uuid):
+        with sql.transaction() as session:
+            query = session.query(Subject)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            new_ref = Subject.from_dict(
+                {
+                    "id": uuid4().hex,
+                    'subjects': subject_uuid,
+                    'intra_extension_uuid': extension_uuid
+                }
+            )
+            if not ref:
+                session.add(new_ref)
+                ref = new_ref
+            else:
+                for attr in Subject.attributes:
+                    if attr != 'id':
+                        setattr(ref, attr, getattr(new_ref, attr))
+            return ref.to_dict()
+
+    def add_subject(self, extension_uuid, subject_uuid, subject_name):
+        with sql.transaction() as session:
+            query = session.query(Subject)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            old_ref = ref.to_dict()
+            subjects = dict(old_ref["subjects"])
+            subjects[subject_uuid] = subject_name
+            new_ref = Subject.from_dict(
+                {
+                    "id": old_ref["id"],
+                    'subjects': subjects,
+                    'intra_extension_uuid': old_ref["intra_extension_uuid"]
+                }
+            )
+            for attr in Subject.attributes:
+                if attr != 'id':
+                    setattr(ref, attr, getattr(new_ref, attr))
+            return {"subject": {"uuid": subject_uuid, "name": subject_name}}
+
+    def remove_subject(self, extension_uuid, subject_uuid):
+        with sql.transaction() as session:
+            query = session.query(Subject)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            else:
+                old_ref = ref.to_dict()
+                subjects = dict(old_ref["subjects"])
+                try:
+                    subjects.pop(subject_uuid)
+                except KeyError:
+                    LOG.error("KeyError in remove_subject {} | {}".format(subject_uuid, subjects))
+                else:
+                    new_ref = Subject.from_dict(
+                        {
+                            "id": old_ref["id"],
+                            'subjects': subjects,
+                            'intra_extension_uuid': old_ref["intra_extension_uuid"]
+                        }
+                    )
+                    for attr in Subject.attributes:
+                        if attr != 'id':
+                            setattr(ref, attr, getattr(new_ref, attr))
+
+    def get_object_dict(self, extension_uuid):
+        with sql.transaction() as session:
+            query = session.query(Object)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            return ref.to_dict()
+
+    def set_object_dict(self, extension_uuid, object_uuid):
+        with sql.transaction() as session:
+            query = session.query(Object)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            new_ref = Object.from_dict(
+                {
+                    "id": uuid4().hex,
+                    'objects': object_uuid,
+                    'intra_extension_uuid': extension_uuid
+                }
+            )
+            if not ref:
+                session.add(new_ref)
+                ref = new_ref
+            else:
+                for attr in Object.attributes:
+                    if attr != 'id':
+                        setattr(ref, attr, getattr(new_ref, attr))
+            return ref.to_dict()
+
+    def add_object(self, extension_uuid, object_uuid, object_name):
+        with sql.transaction() as session:
+            query = session.query(Object)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            old_ref = ref.to_dict()
+            objects = dict(old_ref["objects"])
+            objects[object_uuid] = object_name
+            new_ref = Object.from_dict(
+                {
+                    "id": old_ref["id"],
+                    'objects': objects,
+                    'intra_extension_uuid': old_ref["intra_extension_uuid"]
+                }
+            )
+            for attr in Object.attributes:
+                if attr != 'id':
+                    setattr(ref, attr, getattr(new_ref, attr))
+            return {"object": {"uuid": object_uuid, "name": object_name}}
+
+    def remove_object(self, extension_uuid, object_uuid):
+        with sql.transaction() as session:
+            query = session.query(Object)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            else:
+                old_ref = ref.to_dict()
+                objects = dict(old_ref["objects"])
+                try:
+                    objects.pop(object_uuid)
+                except KeyError:
+                    LOG.error("KeyError in remove_object {} | {}".format(object_uuid, objects))
+                else:
+                    new_ref = Object.from_dict(
+                        {
+                            "id": old_ref["id"],
+                            'objects': objects,
+                            'intra_extension_uuid': old_ref["intra_extension_uuid"]
+                        }
+                    )
+                    for attr in Object.attributes:
+                        if attr != 'id':
+                            setattr(ref, attr, getattr(new_ref, attr))
+
+    def get_action_dict(self, extension_uuid):
+        with sql.transaction() as session:
+            query = session.query(Action)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            return ref.to_dict()
+
+    def set_action_dict(self, extension_uuid, action_uuid):
+        with sql.transaction() as session:
+            query = session.query(Action)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            new_ref = Action.from_dict(
+                {
+                    "id": uuid4().hex,
+                    'actions': action_uuid,
+                    'intra_extension_uuid': extension_uuid
+                }
+            )
+            if not ref:
+                session.add(new_ref)
+                ref = new_ref
+            else:
+                for attr in Action.attributes:
+                    if attr != 'id':
+                        setattr(ref, attr, getattr(new_ref, attr))
+            return ref.to_dict()
+
+    def add_action(self, extension_uuid, action_uuid, action_name):
+        with sql.transaction() as session:
+            query = session.query(Action)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            old_ref = ref.to_dict()
+            actions = dict(old_ref["actions"])
+            actions[action_uuid] = action_name
+            new_ref = Action.from_dict(
+                {
+                    "id": old_ref["id"],
+                    'actions': actions,
+                    'intra_extension_uuid': old_ref["intra_extension_uuid"]
+                }
+            )
+            for attr in Action.attributes:
+                if attr != 'id':
+                    setattr(ref, attr, getattr(new_ref, attr))
+            return {"action": {"uuid": action_uuid, "name": action_name}}
+
+    def remove_action(self, extension_uuid, action_uuid):
+        with sql.transaction() as session:
+            query = session.query(Action)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            else:
+                old_ref = ref.to_dict()
+                actions = dict(old_ref["actions"])
+                try:
+                    actions.pop(action_uuid)
+                except KeyError:
+                    LOG.error("KeyError in remove_action {} | {}".format(action_uuid, actions))
+                else:
+                    new_ref = Action.from_dict(
+                        {
+                            "id": old_ref["id"],
+                            'actions': actions,
+                            'intra_extension_uuid': old_ref["intra_extension_uuid"]
+                        }
+                    )
+                    for attr in Action.attributes:
+                        if attr != 'id':
+                            setattr(ref, attr, getattr(new_ref, attr))
+
+    # Getter and Setter for subject_category
+
+    def get_subject_category_dict(self, extension_uuid):
+        with sql.transaction() as session:
+            query = session.query(SubjectCategory)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            return ref.to_dict()
+
+    def set_subject_category_dict(self, extension_uuid, subject_categories):
+        with sql.transaction() as session:
+            query = session.query(SubjectCategory)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            new_ref = SubjectCategory.from_dict(
+                {
+                    "id": uuid4().hex,
+                    'subject_categories': subject_categories,
+                    'intra_extension_uuid': extension_uuid
+                }
+            )
+            if not ref:
+                session.add(new_ref)
+                ref = new_ref
+            else:
+                for attr in SubjectCategory.attributes:
+                    if attr != 'id':
+                        setattr(ref, attr, getattr(new_ref, attr))
+            return ref.to_dict()
+
+    def add_subject_category_dict(self, extension_uuid, subject_category_uuid, subject_category_name):
+        with sql.transaction() as session:
+            query = session.query(SubjectCategory)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            old_ref = ref.to_dict()
+            subject_categories = dict(old_ref["subject_categories"])
+            subject_categories[subject_category_uuid] = subject_category_name
+            new_ref = SubjectCategory.from_dict(
+                {
+                    "id": old_ref["id"],
+                    'subject_categories': subject_categories,
+                    'intra_extension_uuid': old_ref["intra_extension_uuid"]
+                }
+            )
+            for attr in SubjectCategory.attributes:
+                if attr != 'id':
+                    setattr(ref, attr, getattr(new_ref, attr))
+            return {"subject_category": {"uuid": subject_category_uuid, "name": subject_category_name}}
+
+    def remove_subject_category(self, extension_uuid, subject_category_uuid):
+        with sql.transaction() as session:
+            query = session.query(SubjectCategory)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            else:
+                old_ref = ref.to_dict()
+                subject_categories = dict(old_ref["subject_categories"])
+                try:
+                    subject_categories.pop(subject_category_uuid)
+                except KeyError:
+                    pass
+                else:
+                    new_ref = SubjectCategory.from_dict(
+                        {
+                            "id": old_ref["id"],
+                            'subject_categories': subject_categories,
+                            'intra_extension_uuid': old_ref["intra_extension_uuid"]
+                        }
+                    )
+                    for attr in SubjectCategory.attributes:
+                        if attr != 'id':
+                            setattr(ref, attr, getattr(new_ref, attr))
+            return ref.to_dict()
+
+    # Getter and Setter for object_category
+
+    def get_object_category_dict(self, extension_uuid):
+        with sql.transaction() as session:
+            query = session.query(ObjectCategory)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            return ref.to_dict()
+
+    def set_object_category_dict(self, extension_uuid, object_categories):
+        with sql.transaction() as session:
+            query = session.query(ObjectCategory)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            new_ref = ObjectCategory.from_dict(
+                {
+                    "id": uuid4().hex,
+                    'object_categories': object_categories,
+                    'intra_extension_uuid': extension_uuid
+                }
+            )
+            if not ref:
+                session.add(new_ref)
+                ref = new_ref
+            else:
+                for attr in ObjectCategory.attributes:
+                    if attr != 'id':
+                        setattr(ref, attr, getattr(new_ref, attr))
+            return ref.to_dict()
+
+    def add_object_category_dict(self, extension_uuid, object_category_uuid, object_category_name):
+        with sql.transaction() as session:
+            query = session.query(ObjectCategory)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            old_ref = ref.to_dict()
+            object_categories = dict(old_ref["object_categories"])
+            object_categories[object_category_uuid] = object_category_name
+            new_ref = ObjectCategory.from_dict(
+                {
+                    "id": old_ref["id"],
+                    'object_categories': object_categories,
+                    'intra_extension_uuid': old_ref["intra_extension_uuid"]
+                }
+            )
+            for attr in ObjectCategory.attributes:
+                if attr != 'id':
+                    setattr(ref, attr, getattr(new_ref, attr))
+            return {"object_category": {"uuid": object_category_uuid, "name": object_category_name}}
+
+    def remove_object_category(self, extension_uuid, object_category_uuid):
+        with sql.transaction() as session:
+            query = session.query(ObjectCategory)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            else:
+                old_ref = ref.to_dict()
+                object_categories = dict(old_ref["object_categories"])
+                try:
+                    object_categories.pop(object_category_uuid)
+                except KeyError:
+                    pass
+                else:
+                    new_ref = ObjectCategory.from_dict(
+                        {
+                            "id": old_ref["id"],
+                            'object_categories': object_categories,
+                            'intra_extension_uuid': old_ref["intra_extension_uuid"]
+                        }
+                    )
+                    for attr in ObjectCategory.attributes:
+                        if attr != 'id':
+                            setattr(ref, attr, getattr(new_ref, attr))
+            return ref.to_dict()
+
+    # Getter and Setter for action_category
+
+    def get_action_category_dict(self, extension_uuid):
+        with sql.transaction() as session:
+            query = session.query(ActionCategory)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            return ref.to_dict()
+
+    def set_action_category_dict(self, extension_uuid, action_categories):
+        with sql.transaction() as session:
+            query = session.query(ActionCategory)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            new_ref = ActionCategory.from_dict(
+                {
+                    "id": uuid4().hex,
+                    'action_categories': action_categories,
+                    'intra_extension_uuid': extension_uuid
+                }
+            )
+            if not ref:
+                session.add(new_ref)
+                ref = new_ref
+            else:
+                for attr in ActionCategory.attributes:
+                    if attr != 'id':
+                        setattr(ref, attr, getattr(new_ref, attr))
+            return ref.to_dict()
+
+    def add_action_category_dict(self, extension_uuid, action_category_uuid, action_category_name):
+        with sql.transaction() as session:
+            query = session.query(ActionCategory)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            old_ref = ref.to_dict()
+            action_categories = dict(old_ref["action_categories"])
+            action_categories[action_category_uuid] = action_category_name
+            new_ref = ActionCategory.from_dict(
+                {
+                    "id": old_ref["id"],
+                    'action_categories': action_categories,
+                    'intra_extension_uuid': old_ref["intra_extension_uuid"]
+                }
+            )
+            for attr in ActionCategory.attributes:
+                if attr != 'id':
+                    setattr(ref, attr, getattr(new_ref, attr))
+            return {"action_category": {"uuid": action_category_uuid, "name": action_category_name}}
+
+    def remove_action_category(self, extension_uuid, action_category_uuid):
+        with sql.transaction() as session:
+            query = session.query(ActionCategory)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            else:
+                old_ref = ref.to_dict()
+                action_categories = dict(old_ref["action_categories"])
+                try:
+                    action_categories.pop(action_category_uuid)
+                except KeyError:
+                    pass
+                else:
+                    new_ref = ActionCategory.from_dict(
+                        {
+                            "id": old_ref["id"],
+                            'action_categories': action_categories,
+                            'intra_extension_uuid': old_ref["intra_extension_uuid"]
+                        }
+                    )
+                    for attr in ActionCategory.attributes:
+                        if attr != 'id':
+                            setattr(ref, attr, getattr(new_ref, attr))
+            return ref.to_dict()
+
+    # Getter and Setter for subject_category_value_scope
+
+    def get_subject_category_scope_dict(self, extension_uuid, subject_category):
+        with sql.transaction() as session:
+            query = session.query(SubjectCategoryScope)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            result = copy.deepcopy(ref.to_dict())
+            if subject_category not in result["subject_category_scope"].keys():
+                raise CategoryNotFound()
+            result["subject_category_scope"] = {subject_category: result["subject_category_scope"][subject_category]}
+            return result
+
+    def set_subject_category_scope_dict(self, extension_uuid, subject_category, scope):
+        with sql.transaction() as session:
+            query = session.query(SubjectCategoryScope)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                new_ref = SubjectCategoryScope.from_dict(
+                    {
+                        "id": uuid4().hex,
+                        'subject_category_scope': {subject_category: scope},
+                        'intra_extension_uuid': extension_uuid
+                    }
+                )
+                session.add(new_ref)
+                ref = new_ref
+            else:
+                tmp_ref = ref.to_dict()
+                tmp_ref['subject_category_scope'].update({subject_category: scope})
+                session.delete(ref)
+                new_ref = SubjectCategoryScope.from_dict(tmp_ref)
+                session.add(new_ref)
+            return ref.to_dict()
+
+    def add_subject_category_scope_dict(self, extension_uuid, subject_category, scope_uuid, scope_name):
+        with sql.transaction() as session:
+            query = session.query(SubjectCategoryScope)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            old_ref = ref.to_dict()
+            scope = copy.deepcopy(old_ref["subject_category_scope"])
+            if subject_category not in scope.keys():
+                scope[subject_category] = dict()
+            scope[subject_category][scope_uuid] = scope_name
+            self.set_subject_category_scope_dict(extension_uuid, subject_category, scope[subject_category])
+            return {"subject_category_scope": {"uuid": scope_uuid, "name": scope_name}}
+
+    def remove_subject_category_scope_dict(self, extension_uuid, subject_category, scope_uuid):
+        with sql.transaction() as session:
+            query = session.query(SubjectCategoryScope)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            old_ref = ref.to_dict()
+            scope = dict(old_ref["subject_category_scope"])
+            if subject_category not in scope:
+                return
+            try:
+                scope[subject_category].pop(scope_uuid)
+            except KeyError:
+                return
+            new_ref = SubjectCategoryScope.from_dict(
+                {
+                    "id": old_ref["id"],
+                    'subject_category_scope': scope,
+                    'intra_extension_uuid': old_ref["intra_extension_uuid"]
+                }
+            )
+            for attr in SubjectCategoryScope.attributes:
+                if attr != 'id':
+                    setattr(ref, attr, getattr(new_ref, attr))
+            return ref.to_dict()
+
+    # Getter and Setter for object_category_scope
+
+    def get_object_category_scope_dict(self, extension_uuid, object_category):
+        with sql.transaction() as session:
+            query = session.query(ObjectCategoryScope)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            result = copy.deepcopy(ref.to_dict())
+            if object_category not in result["object_category_scope"].keys():
+                raise CategoryNotFound()
+            result["object_category_scope"] = {object_category: result["object_category_scope"][object_category]}
+            return result
+
+    def set_object_category_scope_dict(self, extension_uuid, object_category, scope):
+        with sql.transaction() as session:
+            query = session.query(ObjectCategoryScope)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                new_ref = ObjectCategoryScope.from_dict(
+                    {
+                        "id": uuid4().hex,
+                        'object_category_scope': {object_category: scope},
+                        'intra_extension_uuid': extension_uuid
+                    }
+                )
+                session.add(new_ref)
+                ref = new_ref
+            else:
+                tmp_ref = ref.to_dict()
+                tmp_ref['object_category_scope'].update({object_category: scope})
+                session.delete(ref)
+                new_ref = ObjectCategoryScope.from_dict(tmp_ref)
+                session.add(new_ref)
+            return ref.to_dict()
+
+    def add_object_category_scope_dict(self, extension_uuid, object_category, scope_uuid, scope_name):
+        with sql.transaction() as session:
+            query = session.query(ObjectCategoryScope)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            old_ref = ref.to_dict()
+            scope = dict(old_ref["object_category_scope"])
+            if object_category not in scope:
+                scope[object_category] = dict()
+            scope[object_category][scope_uuid] = scope_name
+            self.set_object_category_scope_dict(extension_uuid, object_category, scope[object_category])
+            return {"object_category_scope": {"uuid": scope_uuid, "name": scope_name}}
+
+    def remove_object_category_scope_dict(self, extension_uuid, object_category, scope_uuid):
+        with sql.transaction() as session:
+            query = session.query(ObjectCategoryScope)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            old_ref = ref.to_dict()
+            scope = dict(old_ref["object_category_scope"])
+            if object_category not in scope:
+                return
+            try:
+                scope[object_category].pop(scope_uuid)
+            except KeyError:
+                return
+            new_ref = ObjectCategoryScope.from_dict(
+                {
+                    "id": old_ref["id"],
+                    'object_category_scope': scope,
+                    'intra_extension_uuid': old_ref["intra_extension_uuid"]
+                }
+            )
+            for attr in ObjectCategoryScope.attributes:
+                if attr != 'id':
+                    setattr(ref, attr, getattr(new_ref, attr))
+            return ref.to_dict()
+
+    # Getter and Setter for action_category_scope
+    def get_action_category_scope_dict(self, extension_uuid, action_category):
+        with sql.transaction() as session:
+            query = session.query(ActionCategoryScope)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            result = copy.deepcopy(ref.to_dict())
+            if action_category not in result["action_category_scope"].keys():
+                raise CategoryNotFound("Unknown category id {}/{}".format(action_category, result["action_category_scope"].keys()))
+            result["action_category_scope"] = {action_category: result["action_category_scope"][action_category]}
+            return result
+
+    def set_action_category_scope_dict(self, extension_uuid, action_category, scope):
+        with sql.transaction() as session:
+            query = session.query(ActionCategoryScope)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                new_ref = ActionCategoryScope.from_dict(
+                    {
+                        "id": uuid4().hex,
+                        'action_category_scope': {action_category: scope},
+                        'intra_extension_uuid': extension_uuid
+                    }
+                )
+                session.add(new_ref)
+                ref = new_ref
+            else:
+                tmp_ref = ref.to_dict()
+                tmp_ref['action_category_scope'].update({action_category: scope})
+                session.delete(ref)
+                new_ref = ActionCategoryScope.from_dict(tmp_ref)
+                session.add(new_ref)
+            return ref.to_dict()
+
+    def add_action_category_scope_dict(self, extension_uuid, action_category, scope_uuid, scope_name):
+        with sql.transaction() as session:
+            query = session.query(ActionCategoryScope)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            old_ref = ref.to_dict()
+            scope = dict(old_ref["action_category_scope"])
+            if action_category not in scope:
+                scope[action_category] = dict()
+            scope[action_category][scope_uuid] = scope_name
+            self.set_action_category_scope_dict(extension_uuid, action_category, scope[action_category])
+            return {"action_category_scope": {"uuid": scope_uuid, "name": scope_name}}
+
+    def remove_action_category_scope_dict(self, extension_uuid, action_category, scope_uuid):
+        with sql.transaction() as session:
+            query = session.query(ActionCategoryScope)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            old_ref = ref.to_dict()
+            scope = dict(old_ref["action_category_scope"])
+            if action_category not in scope:
+                return
+            try:
+                scope[action_category].pop(scope_uuid)
+            except KeyError:
+                return
+            new_ref = ActionCategoryScope.from_dict(
+                {
+                    "id": old_ref["id"],
+                    'action_category_scope': scope,
+                    'intra_extension_uuid': old_ref["intra_extension_uuid"]
+                }
+            )
+            for attr in ActionCategoryScope.attributes:
+                if attr != 'id':
+                    setattr(ref, attr, getattr(new_ref, attr))
+            return ref.to_dict()
+
+    # Getter and Setter for subject_category_assignment
+
+    def get_subject_category_assignment_dict(self, extension_uuid, subject_uuid):
+        """ From a subject_uuid, return a dictionary of (category: scope for that subject)
+
+        :param extension_uuid: intra extension UUID
+        :param subject_uuid: subject UUID
+        :return: a dictionary of (keys are category nd values are scope for that subject)
+        """
+        with sql.transaction() as session:
+            query = session.query(SubjectCategoryAssignment)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound("get_subject_category_assignment_dict")
+            _ref = ref.to_dict()
+            if subject_uuid in _ref["subject_category_assignments"]:
+                _backup_dict = _ref["subject_category_assignments"][subject_uuid]
+                _ref["subject_category_assignments"] = dict()
+                _ref["subject_category_assignments"][subject_uuid] = _backup_dict
+            else:
+                _ref["subject_category_assignments"] = dict()
+                _ref["subject_category_assignments"][subject_uuid] = dict()
+            return _ref
+
+    def set_subject_category_assignment_dict(self, extension_uuid, subject_uuid=None, assignment_dict={}):
+        with sql.transaction() as session:
+            query = session.query(SubjectCategoryAssignment)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if type(assignment_dict) is not dict:
+                raise IntraExtensionError()
+            for value in assignment_dict.values():
+                if type(value) is not list:
+                    raise IntraExtensionError(str(value))
+            if not subject_uuid:
+                subject_category_assignments = {}
+            else:
+                subject_category_assignments = {subject_uuid: assignment_dict}
+            new_ref = SubjectCategoryAssignment.from_dict(
+                {
+                    "id": uuid4().hex,
+                    'subject_category_assignments': subject_category_assignments,
+                    'intra_extension_uuid': extension_uuid
+                }
+            )
+            if not ref:
+                session.add(new_ref)
+                ref = new_ref
+            else:
+                new_ref.subject_category_assignments[subject_uuid] = assignment_dict
+                for attr in SubjectCategoryAssignment.attributes:
+                    if attr != 'id':
+                        setattr(ref, attr, getattr(new_ref, attr))
+            return ref.to_dict()
+
+    def add_subject_category_assignment_dict(self, extension_uuid, subject_uuid, category_uuid, scope_uuid):
+        with sql.transaction() as session:
+            query = session.query(SubjectCategoryAssignment)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            assignments = ref.to_dict()['subject_category_assignments']
+            if subject_uuid not in assignments:
+                assignments[subject_uuid] = dict()
+            if category_uuid not in assignments[subject_uuid]:
+                assignments[subject_uuid][category_uuid] = list()
+            if scope_uuid not in assignments[subject_uuid][category_uuid]:
+                assignments[subject_uuid][category_uuid].append(scope_uuid)
+            return self.set_subject_category_assignment_dict(
+                extension_uuid,
+                subject_uuid,
+                assignments[subject_uuid])
+
+    def remove_subject_category_assignment(self, extension_uuid, subject_uuid, category_uuid, scope_uuid):
+        with sql.transaction() as session:
+            query = session.query(SubjectCategoryAssignment)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            old_ref = ref.to_dict()
+            if subject_uuid in old_ref["subject_category_assignments"]:
+                if category_uuid in old_ref["subject_category_assignments"][subject_uuid]:
+                    old_ref["subject_category_assignments"][subject_uuid][category_uuid].remove(scope_uuid)
+                    if not old_ref["subject_category_assignments"][subject_uuid][category_uuid]:
+                        old_ref["subject_category_assignments"][subject_uuid].pop(category_uuid)
+                    if not old_ref["subject_category_assignments"][subject_uuid]:
+                        old_ref["subject_category_assignments"].pop(subject_uuid)
+            try:
+                self.set_subject_category_assignment_dict(
+                    extension_uuid,
+                    subject_uuid,
+                    old_ref["subject_category_assignments"][subject_uuid])
+            except KeyError:
+                self.set_subject_category_assignment_dict(
+                    extension_uuid,
+                    subject_uuid,
+                    {})
+
+    # Getter and Setter for object_category_assignment
+
+    def get_object_category_assignment_dict(self, extension_uuid, object_uuid):
+        """ From a object_uuid, return a dictionary of (category: scope for that object)
+
+        :param extension_uuid: intra extension UUID
+        :param object_uuid: object UUID
+        :return: a dictionary of (keys are category nd values are scope for that object)
+        """
+        with sql.transaction() as session:
+            query = session.query(ObjectCategoryAssignment)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            _ref = ref.to_dict()
+            if object_uuid in _ref["object_category_assignments"]:
+                _backup_dict = _ref["object_category_assignments"][object_uuid]
+                _ref["object_category_assignments"] = dict()
+                _ref["object_category_assignments"][object_uuid] = _backup_dict
+            else:
+                _ref["object_category_assignments"] = dict()
+                _ref["object_category_assignments"][object_uuid] = dict()
+            return _ref
+
+    def set_object_category_assignment_dict(self, extension_uuid, object_uuid=None, assignment_dict={}):
+        with sql.transaction() as session:
+            query = session.query(ObjectCategoryAssignment)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if type(assignment_dict) is not dict:
+                raise IntraExtensionError()
+            for value in assignment_dict.values():
+                if type(value) is not list:
+                    raise IntraExtensionError(str(value))
+            new_ref = ObjectCategoryAssignment.from_dict(
+                {
+                    "id": uuid4().hex,
+                    'object_category_assignments': {object_uuid: assignment_dict},
+                    'intra_extension_uuid': extension_uuid
+                }
+            )
+            if not ref:
+                session.add(new_ref)
+                ref = new_ref
+            else:
+                new_ref.object_category_assignments[object_uuid] = assignment_dict
+                for attr in ObjectCategoryAssignment.attributes:
+                    if attr != 'id':
+                        setattr(ref, attr, getattr(new_ref, attr))
+            return ref.to_dict()
+
+    def add_object_category_assignment_dict(self, extension_uuid, object_uuid, category_uuid, scope_uuid):
+        with sql.transaction() as session:
+            query = session.query(ObjectCategoryAssignment)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            assignments = ref.to_dict()['object_category_assignments']
+            if object_uuid not in assignments:
+                assignments[object_uuid] = dict()
+            if category_uuid not in assignments[object_uuid]:
+                assignments[object_uuid][category_uuid] = list()
+            if scope_uuid not in assignments[object_uuid][category_uuid]:
+                assignments[object_uuid][category_uuid].append(scope_uuid)
+            return self.set_object_category_assignment_dict(
+                extension_uuid,
+                object_uuid,
+                assignments[object_uuid])
+
+    def remove_object_category_assignment(self, extension_uuid, object_uuid, category_uuid, scope_uuid):
+        with sql.transaction() as session:
+            query = session.query(ObjectCategoryAssignment)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            old_ref = ref.to_dict()
+            if object_uuid in old_ref["object_category_assignments"]:
+                if category_uuid in old_ref["object_category_assignments"][object_uuid]:
+                    old_ref["object_category_assignments"][object_uuid][category_uuid].remove(scope_uuid)
+                    if not old_ref["object_category_assignments"][object_uuid][category_uuid]:
+                        old_ref["object_category_assignments"][object_uuid].pop(category_uuid)
+                    if not old_ref["object_category_assignments"][object_uuid]:
+                        old_ref["object_category_assignments"].pop(object_uuid)
+            self.set_object_category_assignment_dict(
+                extension_uuid,
+                object_uuid,
+                old_ref["object_category_assignments"][object_uuid])
+
+    # Getter and Setter for action_category_assignment
+
+    def get_action_category_assignment_dict(self, extension_uuid, action_uuid):
+        """ From a action_uuid, return a dictionary of (category: scope for that action)
+
+        :param extension_uuid: intra extension UUID
+        :param action_uuid: action UUID
+        :return: a dictionary of (keys are category nd values are scope for that action)
+        """
+        with sql.transaction() as session:
+            query = session.query(ActionCategoryAssignment)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            _ref = ref.to_dict()
+            if action_uuid in _ref["action_category_assignments"]:
+                _backup_dict = _ref["action_category_assignments"][action_uuid]
+                _ref["action_category_assignments"] = dict()
+                _ref["action_category_assignments"][action_uuid] = _backup_dict
+            else:
+                _ref["action_category_assignments"] = dict()
+                _ref["action_category_assignments"][action_uuid] = dict()
+            return _ref
+
+    def set_action_category_assignment_dict(self, extension_uuid, action_uuid=None, assignment_dict={}):
+        with sql.transaction() as session:
+            query = session.query(ActionCategoryAssignment)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if type(assignment_dict) is not dict:
+                raise IntraExtensionError()
+            for value in assignment_dict.values():
+                if type(value) is not list:
+                    raise IntraExtensionError(str(value))
+            new_ref = ActionCategoryAssignment.from_dict(
+                {
+                    "id": uuid4().hex,
+                    'action_category_assignments': {action_uuid: assignment_dict},
+                    'intra_extension_uuid': extension_uuid
+                }
+            )
+            if not ref:
+                session.add(new_ref)
+                ref = new_ref
+            else:
+                new_ref.action_category_assignments[action_uuid] = assignment_dict
+                for attr in ActionCategoryAssignment.attributes:
+                    if attr != 'id':
+                        setattr(ref, attr, getattr(new_ref, attr))
+            return ref.to_dict()
+
+    def add_action_category_assignment_dict(self, extension_uuid, action_uuid, category_uuid, scope_uuid):
+        with sql.transaction() as session:
+            query = session.query(ActionCategoryAssignment)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            assignments = ref.to_dict()['action_category_assignments']
+            if action_uuid not in assignments:
+                assignments[action_uuid] = dict()
+            if category_uuid not in assignments[action_uuid]:
+                assignments[action_uuid][category_uuid] = list()
+            if scope_uuid not in assignments[action_uuid][category_uuid]:
+                assignments[action_uuid][category_uuid].append(scope_uuid)
+            return self.set_action_category_assignment_dict(
+                extension_uuid,
+                action_uuid,
+                assignments[action_uuid])
+
+    def remove_action_category_assignment(self, extension_uuid, action_uuid, category_uuid, scope_uuid):
+        with sql.transaction() as session:
+            query = session.query(ActionCategoryAssignment)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            old_ref = ref.to_dict()
+            if action_uuid in old_ref["action_category_assignments"]:
+                if category_uuid in old_ref["action_category_assignments"][action_uuid]:
+                    old_ref["action_category_assignments"][action_uuid][category_uuid].remove(scope_uuid)
+                    if not old_ref["action_category_assignments"][action_uuid][category_uuid]:
+                        old_ref["action_category_assignments"][action_uuid].pop(category_uuid)
+                    if not old_ref["action_category_assignments"][action_uuid]:
+                        old_ref["action_category_assignments"].pop(action_uuid)
+            self.set_action_category_assignment_dict(
+                extension_uuid,
+                action_uuid,
+                old_ref["action_category_assignments"][action_uuid])
+
+    # Getter and Setter for meta_rule
+
+    def get_meta_rule_dict(self, extension_uuid):
+        with sql.transaction() as session:
+            query = session.query(MetaRule)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            return ref.to_dict()
+
+    def set_meta_rule_dict(self, extension_uuid, meta_rule):
+        with sql.transaction() as session:
+            query = session.query(MetaRule)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            meta_rule["id"] = uuid4().hex
+            meta_rule["intra_extension_uuid"] = extension_uuid
+            new_ref = MetaRule.from_dict(meta_rule)
+            if not ref:
+                session.add(new_ref)
+                ref = new_ref
+            else:
+                for attr in MetaRule.attributes:
+                    if attr != 'id':
+                        setattr(ref, attr, getattr(new_ref, attr))
+            return ref.to_dict()
+
+    # Getter and Setter for rules
+
+    def get_rules(self, extension_uuid):
+        with sql.transaction() as session:
+            query = session.query(Rule)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            if not ref:
+                raise IntraExtensionNotFound()
+            return ref.to_dict()
+
+    def set_rules(self, extension_uuid, subrules):
+        with sql.transaction() as session:
+            query = session.query(Rule)
+            query = query.filter_by(intra_extension_uuid=extension_uuid)
+            ref = query.first()
+            rules = dict()
+            rules["id"] = uuid4().hex
+            rules["intra_extension_uuid"] = extension_uuid
+            rules["rules"] = subrules
+            new_ref = Rule.from_dict(rules)
+            if not ref:
+                session.add(new_ref)
+                ref = new_ref
+            else:
+                for attr in Rule.attributes:
+                    if attr != 'id':
+                        setattr(ref, attr, getattr(new_ref, attr))
+            return ref.to_dict()
+
+
+class TenantConnector(TenantDriver):
+
+    def get_tenant_dict(self):
+        with sql.transaction() as session:
+            query = session.query(Tenant)
+            # query = query.filter_by(uuid=tenant_uuid)
+            # ref = query.first().to_dict()
+            tenants = query.all()
+            if not tenants:
+                raise TenantListEmptyError()
+            return {tenant.id: Tenant.to_dict(tenant) for tenant in tenants}
+            # return [Tenant.to_dict(tenant) for tenant in tenants]
+
+    def set_tenant_dict(self, tenant):
+        with sql.transaction() as session:
+            uuid = tenant.keys()[0]
+            query = session.query(Tenant)
+            query = query.filter_by(id=uuid)
+            ref = query.first()
+            if not ref:
+                # if not result, create the database line
+                ref = Tenant.from_dict(tenant)
+                session.add(ref)
+                return Tenant.to_dict(ref)
+            elif not tenant[uuid]["authz"] and not tenant[uuid]["admin"]:
+                # if admin and authz extensions are not set, delete the mapping
+                session.delete(ref)
+                return
+            elif tenant[uuid]["authz"] or tenant[uuid]["admin"]:
+                tenant_ref = ref.to_dict()
+                tenant_ref.update(tenant[uuid])
+                new_tenant = Tenant(
+                    id=uuid,
+                    name=tenant[uuid]["name"],
+                    authz=tenant[uuid]["authz"],
+                    admin=tenant[uuid]["admin"],
+                )
+                for attr in Tenant.attributes:
+                    if attr != 'id':
+                        setattr(ref, attr, getattr(new_tenant, attr))
+                return Tenant.to_dict(ref)
+            raise TenantError()
+
+
+# class InterExtension(sql.ModelBase, sql.DictBase):
+#     __tablename__ = 'inter_extension'
+#     attributes = [
+#         'id',
+#         'requesting_intra_extension_uuid',
+#         'requested_intra_extension_uuid',
+#         'virtual_entity_uuid',
+#         'genre',
+#         'description',
+#     ]
+#     id = sql.Column(sql.String(64), primary_key=True)
+#     requesting_intra_extension_uuid = sql.Column(sql.String(64))
+#     requested_intra_extension_uuid = sql.Column(sql.String(64))
+#     virtual_entity_uuid = sql.Column(sql.String(64))
+#     genre = sql.Column(sql.String(64))
+#     description = sql.Column(sql.Text())
+#
+#     @classmethod
+#     def from_dict(cls, d):
+#         """Override parent from_dict() method with a simpler implementation.
+#         """
+#         new_d = d.copy()
+#         return cls(**new_d)
+#
+#     def to_dict(self):
+#         """Override parent to_dict() method with a simpler implementation.
+#         """
+#         return dict(six.iteritems(self))
+#
+#
+# class InterExtensionConnector(InterExtensionDriver):
+#
+#     def get_inter_extensions(self):
+#         with sql.transaction() as session:
+#             query = session.query(InterExtension.id)
+#             interextensions = query.all()
+#             return [interextension.id for interextension in interextensions]
+#
+#     def create_inter_extensions(self, inter_id, inter_extension):
+#         with sql.transaction() as session:
+#             ie_ref = InterExtension.from_dict(inter_extension)
+#             session.add(ie_ref)
+#         return InterExtension.to_dict(ie_ref)
+#
+#     def get_inter_extension(self, uuid):
+#         with sql.transaction() as session:
+#             query = session.query(InterExtension)
+#             query = query.filter_by(id=uuid)
+#             ref = query.first()
+#             if not ref:
+#                 raise exception.NotFound
+#             return ref.to_dict()
+#
+#     def delete_inter_extensions(self, inter_extension_id):
+#         with sql.transaction() as session:
+#             ref = session.query(InterExtension).get(inter_extension_id)
+#             session.delete(ref)
+
diff --git a/keystone-moon/keystone/contrib/moon/controllers.py b/keystone-moon/keystone/contrib/moon/controllers.py
new file mode 100644 (file)
index 0000000..3c87da4
--- /dev/null
@@ -0,0 +1,611 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
+
+from keystone.common import controller
+from keystone.common import dependency
+from keystone import config
+from keystone.models import token_model
+from keystone import exception
+import os
+import glob
+from oslo_log import log
+
+CONF = config.CONF
+LOG = log.getLogger(__name__)
+
+
+@dependency.requires('authz_api')
+class Authz_v3(controller.V3Controller):
+
+    def __init__(self):
+        super(Authz_v3, self).__init__()
+
+    @controller.protected()
+    def get_authz(self, context, tenant_id, subject_id, object_id, action_id):
+        # TODO (dthom): build the authz functionality
+        try:
+            _authz = self.authz_api.authz(tenant_id, subject_id, object_id, action_id)
+        except exception.NotFound:
+            _authz = True
+        except:
+            _authz = False
+        return {"authz": _authz,
+                "tenant_id": tenant_id,
+                "subject_id": subject_id,
+                "object_id": object_id,
+                "action_id": action_id}
+
+
+@dependency.requires('admin_api', 'authz_api')
+class IntraExtensions(controller.V3Controller):
+    collection_name = 'intra_extensions'
+    member_name = 'intra_extension'
+
+    def __init__(self):
+        super(IntraExtensions, self).__init__()
+
+    def _get_user_from_token(self, token_id):
+        response = self.token_provider_api.validate_token(token_id)
+        token_ref = token_model.KeystoneToken(token_id=token_id, token_data=response)
+        return token_ref['user']
+
+    # IntraExtension functions
+    @controller.protected()
+    def get_intra_extensions(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        return {
+            "intra_extensions":
+                self.admin_api.get_intra_extension_list()
+        }
+
+    @controller.protected()
+    def get_intra_extension(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        return {
+            "intra_extensions":
+                self.admin_api.get_intra_extension(uuid=kw['intra_extensions_id'])
+        }
+
+    @controller.protected()
+    def create_intra_extension(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        return self.admin_api.load_intra_extension(kw)
+
+    @controller.protected()
+    def delete_intra_extension(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        if "intra_extensions_id" not in kw:
+            raise exception.Error
+        return self.admin_api.delete_intra_extension(kw["intra_extensions_id"])
+
+    # Perimeter functions
+    @controller.protected()
+    def get_subjects(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        return self.admin_api.get_subject_dict(user, ie_uuid)
+
+    @controller.protected()
+    def add_subject(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        subject = kw["subject_id"]
+        return self.admin_api.add_subject_dict(user, ie_uuid, subject)
+
+    @controller.protected()
+    def del_subject(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        subject = kw["subject_id"]
+        return self.admin_api.del_subject(user, ie_uuid, subject)
+
+    @controller.protected()
+    def get_objects(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        return self.admin_api.get_object_dict(user, ie_uuid)
+
+    @controller.protected()
+    def add_object(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        object_id = kw["object_id"]
+        return self.admin_api.add_object_dict(user, ie_uuid, object_id)
+
+    @controller.protected()
+    def del_object(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        object_id = kw["object_id"]
+        return self.admin_api.del_object(user, ie_uuid, object_id)
+
+    @controller.protected()
+    def get_actions(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        return self.admin_api.get_action_dict(user, ie_uuid)
+
+    @controller.protected()
+    def add_action(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        action = kw["action_id"]
+        return self.admin_api.add_action_dict(user, ie_uuid, action)
+
+    @controller.protected()
+    def del_action(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        action = kw["action_id"]
+        return self.admin_api.del_action(user, ie_uuid, action)
+
+    # Metadata functions
+    @controller.protected()
+    def get_subject_categories(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        return self.admin_api.get_subject_category_dict(user, ie_uuid)
+
+    @controller.protected()
+    def add_subject_category(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        subject_category = kw["subject_category_id"]
+        return self.admin_api.add_subject_category_dict(user, ie_uuid, subject_category)
+
+    @controller.protected()
+    def del_subject_category(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        subject_category = kw["subject_category_id"]
+        return self.admin_api.del_subject_category(user, ie_uuid, subject_category)
+
+    @controller.protected()
+    def get_object_categories(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        return self.admin_api.get_object_category_dict(user, ie_uuid)
+
+    @controller.protected()
+    def add_object_category(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        object_category = kw["object_category_id"]
+        return self.admin_api.add_object_category_dict(user, ie_uuid, object_category)
+
+    @controller.protected()
+    def del_object_category(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        object_category = kw["object_category_id"]
+        return self.admin_api.del_object_category(user, ie_uuid, object_category)
+
+    @controller.protected()
+    def get_action_categories(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        return self.admin_api.get_action_category_dict(user, ie_uuid)
+
+    @controller.protected()
+    def add_action_category(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        action_category = kw["action_category_id"]
+        return self.admin_api.add_action_category_dict(user, ie_uuid, action_category)
+
+    @controller.protected()
+    def del_action_category(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        action_category = kw["action_category_id"]
+        return self.admin_api.del_action_category(user, ie_uuid, action_category)
+
+    # Scope functions
+    @controller.protected()
+    def get_subject_category_scope(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        category_id = kw["subject_category_id"]
+        return self.admin_api.get_subject_category_scope_dict(user, ie_uuid, category_id)
+
+    @controller.protected()
+    def add_subject_category_scope(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        subject_category = kw["subject_category_id"]
+        subject_category_scope = kw["subject_category_scope_id"]
+        return self.admin_api.add_subject_category_scope_dict(
+                    user,
+                    ie_uuid,
+                    subject_category,
+                    subject_category_scope)
+
+    @controller.protected()
+    def del_subject_category_scope(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        subject_category = kw["subject_category_id"]
+        subject_category_scope = kw["subject_category_scope_id"]
+        return self.admin_api.del_subject_category_scope(
+                    user,
+                    ie_uuid,
+                    subject_category,
+                    subject_category_scope)
+
+    @controller.protected()
+    def get_object_category_scope(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        category_id = kw["object_category_id"]
+        return self.admin_api.get_object_category_scope_dict(user, ie_uuid, category_id)
+
+    @controller.protected()
+    def add_object_category_scope(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        object_category = kw["object_category_id"]
+        object_category_scope = kw["object_category_scope_id"]
+        return self.admin_api.add_object_category_scope_dict(
+                    user,
+                    ie_uuid,
+                    object_category,
+                    object_category_scope)
+
+    @controller.protected()
+    def del_object_category_scope(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        object_category = kw["object_category_id"]
+        object_category_scope = kw["object_category_scope_id"]
+        return self.admin_api.del_object_category_scope(
+                    user,
+                    ie_uuid,
+                    object_category,
+                    object_category_scope)
+
+    @controller.protected()
+    def get_action_category_scope(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        category_id = kw["action_category_id"]
+        return self.admin_api.get_action_category_scope_dict(user, ie_uuid, category_id)
+
+    @controller.protected()
+    def add_action_category_scope(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        action_category = kw["action_category_id"]
+        action_category_scope = kw["action_category_scope_id"]
+        return self.admin_api.add_action_category_scope_dict(
+                    user,
+                    ie_uuid,
+                    action_category,
+                    action_category_scope)
+
+    @controller.protected()
+    def del_action_category_scope(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        action_category = kw["action_category_id"]
+        action_category_scope = kw["action_category_scope_id"]
+        return self.admin_api.del_action_category_scope(
+                    user,
+                    ie_uuid,
+                    action_category,
+                    action_category_scope)
+
+    # Assignment functions
+    @controller.protected()
+    def get_subject_assignments(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        subject_id = kw["subject_id"]
+        return self.admin_api.get_subject_category_assignment_dict(user, ie_uuid, subject_id)
+
+    @controller.protected()
+    def add_subject_assignment(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        subject_id = kw["subject_id"]
+        subject_category = kw["subject_category"]
+        subject_category_scope = kw["subject_category_scope"]
+        return self.admin_api.add_subject_category_assignment_dict(
+                    user,
+                    ie_uuid,
+                    subject_id,
+                    subject_category,
+                    subject_category_scope)
+
+    @controller.protected()
+    def del_subject_assignment(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        subject_id = kw["subject_id"]
+        subject_category = kw["subject_category"]
+        subject_category_scope = kw["subject_category_scope"]
+        return self.admin_api.del_subject_category_assignment(
+                    user,
+                    ie_uuid,
+                    subject_id,
+                    subject_category,
+                    subject_category_scope)
+
+    @controller.protected()
+    def get_object_assignments(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        object_id = kw["object_id"]
+        return self.admin_api.get_object_category_assignment_dict(user, ie_uuid, object_id)
+
+    @controller.protected()
+    def add_object_assignment(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        object_id = kw["object_id"]
+        object_category = kw["object_category"]
+        object_category_scope = kw["object_category_scope"]
+        return self.admin_api.add_object_category_assignment_dict(
+                    user,
+                    ie_uuid,
+                    object_id,
+                    object_category,
+                    object_category_scope)
+
+    @controller.protected()
+    def del_object_assignment(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        object_id = kw["object_id"]
+        object_category = kw["object_category"]
+        object_category_scope = kw["object_category_scope"]
+        return self.admin_api.del_object_category_assignment(
+                    user,
+                    ie_uuid,
+                    object_id,
+                    object_category,
+                    object_category_scope)
+
+    @controller.protected()
+    def get_action_assignments(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        action_id = kw["action_id"]
+        return self.admin_api.get_action_category_assignment_dict(user, ie_uuid, action_id)
+
+    @controller.protected()
+    def add_action_assignment(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        action_id = kw["action_id"]
+        action_category = kw["action_category"]
+        action_category_scope = kw["action_category_scope"]
+        return self.admin_api.add_action_category_assignment_dict(
+                    user,
+                    ie_uuid,
+                    action_id,
+                    action_category,
+                    action_category_scope)
+
+    @controller.protected()
+    def del_action_assignment(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        action_id = kw["action_id"]
+        action_category = kw["action_category"]
+        action_category_scope = kw["action_category_scope"]
+        return self.admin_api.del_object_category_assignment(
+                    user,
+                    ie_uuid,
+                    action_id,
+                    action_category,
+                    action_category_scope)
+
+    # Metarule functions
+    @controller.protected()
+    def get_aggregation_algorithms(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        return self.admin_api.get_aggregation_algorithms(user, ie_uuid)
+
+    @controller.protected()
+    def get_aggregation_algorithm(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        return self.admin_api.get_aggregation_algorithm(user, ie_uuid)
+
+    @controller.protected()
+    def set_aggregation_algorithm(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        aggregation_algorithm = kw["aggregation_algorithm"]
+        return self.admin_api.set_aggregation_algorithm(user, ie_uuid, aggregation_algorithm)
+
+    @controller.protected()
+    def get_sub_meta_rule(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        return self.admin_api.get_sub_meta_rule(user, ie_uuid)
+
+    @controller.protected()
+    def set_sub_meta_rule(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw.pop("intra_extensions_id")
+        # subject_categories = kw["subject_categories"]
+        # action_categories = kw["action_categories"]
+        # object_categories = kw["object_categories"]
+        # relation = kw["relation"]
+        # aggregation_algorithm = kw["aggregation_algorithm"]
+        return self.admin_api.set_sub_meta_rule(
+                    user,
+                    ie_uuid,
+                    kw)
+
+    @controller.protected()
+    def get_sub_meta_rule_relations(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        return self.admin_api.get_sub_meta_rule_relations(user, ie_uuid)
+
+    # Rules functions
+    @controller.protected()
+    def get_sub_rules(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        return self.admin_api.get_sub_rules(user, ie_uuid)
+
+    @controller.protected()
+    def set_sub_rule(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        sub_rule = kw["rule"]
+        relation = kw["relation"]
+        return self.admin_api.set_sub_rule(user, ie_uuid, relation, sub_rule)
+
+    @controller.protected()
+    def del_sub_rule(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        ie_uuid = kw["intra_extensions_id"]
+        relation_name = kw["relation_name"]
+        rule = kw["rule"]
+        return self.admin_api.del_sub_rule(
+                    user,
+                    ie_uuid,
+                    relation_name,
+                    rule)
+
+
+class AuthzPolicies(controller.V3Controller):
+    collection_name = 'authz_policies'
+    member_name = 'authz_policy'
+
+    def __init__(self):
+        super(AuthzPolicies, self).__init__()
+
+    @controller.protected()
+    def get_authz_policies(self, context, **kw):
+        nodes = glob.glob(os.path.join(CONF.moon.policy_directory, "*"))
+        return {
+            "authz_policies":
+                [os.path.basename(n) for n in nodes if os.path.isdir(n)]
+        }
+
+
+@dependency.requires('tenant_api', 'resource_api')
+class Tenants(controller.V3Controller):
+
+    def __init__(self):
+        super(Tenants, self).__init__()
+
+    def _get_user_from_token(self, token_id):
+        response = self.token_provider_api.validate_token(token_id)
+        token_ref = token_model.KeystoneToken(token_id=token_id, token_data=response)
+        return token_ref['user']
+
+    @controller.protected()
+    def get_tenants(self, context, **kw):
+        # user = self._get_user_from_token(context["token_id"])
+        return {
+            "tenants":
+                self.tenant_api.get_tenant_dict()
+        }
+
+    @controller.protected()
+    def get_tenant(self, context, **kw):
+        # user = self._get_user_from_token(context["token_id"])
+        tenant_uuid = kw.get("tenant_uuid")
+        return {
+            "tenant":
+                self.tenant_api.get_tenant_dict()[tenant_uuid]
+        }
+
+    @controller.protected()
+    def set_tenant(self, context, **kw):
+        # user = self._get_user_from_token(context["token_id"])
+        tenant_uuid = kw.get("id")
+        name = self.resource_api.get_project(tenant_uuid)["name"]
+        authz = kw.get("authz")
+        admin = kw.get("admin")
+        self.tenant_api.set_tenant_dict(tenant_uuid, name, authz, admin)
+        return {
+            "tenant":
+                self.tenant_api.get_tenant_dict()[tenant_uuid]
+        }
+
+    @controller.protected()
+    def delete_tenant(self, context, **kw):
+        # user = self._get_user_from_token(context["token_id"])
+        tenant_uuid = kw.get("tenant_uuid")
+        self.tenant_api.set_tenant_dict(tenant_uuid, None, None, None)
+
+
+@dependency.requires('authz_api')
+class InterExtensions(controller.V3Controller):
+
+    def __init__(self):
+        super(InterExtensions, self).__init__()
+
+    def _get_user_from_token(self, token_id):
+        response = self.token_provider_api.validate_token(token_id)
+        token_ref = token_model.KeystoneToken(token_id=token_id, token_data=response)
+        return token_ref['user']
+
+    # @controller.protected()
+    # def get_inter_extensions(self, context, **kw):
+    #     user = self._get_user_from_token(context["token_id"])
+    #     return {
+    #         "inter_extensions":
+    #             self.interextension_api.get_inter_extensions()
+    #     }
+
+    # @controller.protected()
+    # def get_inter_extension(self, context, **kw):
+    #     user = self._get_user_from_token(context["token_id"])
+    #     return {
+    #         "inter_extensions":
+    #             self.interextension_api.get_inter_extension(uuid=kw['inter_extensions_id'])
+    #     }
+
+    # @controller.protected()
+    # def create_inter_extension(self, context, **kw):
+    #     user = self._get_user_from_token(context["token_id"])
+    #     return self.interextension_api.create_inter_extension(kw)
+
+    # @controller.protected()
+    # def delete_inter_extension(self, context, **kw):
+    #     user = self._get_user_from_token(context["token_id"])
+    #     if "inter_extensions_id" not in kw:
+    #         raise exception.Error
+    #     return self.interextension_api.delete_inter_extension(kw["inter_extensions_id"])
+
+
+@dependency.requires('authz_api')
+class SuperExtensions(controller.V3Controller):
+
+    def __init__(self):
+        super(SuperExtensions, self).__init__()
+
+
+@dependency.requires('moonlog_api', 'authz_api')
+class Logs(controller.V3Controller):
+
+    def __init__(self):
+        super(Logs, self).__init__()
+
+    def _get_user_from_token(self, token_id):
+        response = self.token_provider_api.validate_token(token_id)
+        token_ref = token_model.KeystoneToken(token_id=token_id, token_data=response)
+        return token_ref['user']
+
+    @controller.protected()
+    def get_logs(self, context, **kw):
+        user = self._get_user_from_token(context["token_id"])
+        options = kw.get("options", "")
+        # FIXME (dthom): the authorization for get_logs must be done with an intra_extension
+        #if self.authz_api.admin(user["name"], "logs", "read"):
+        return {
+            "logs":
+                self.moonlog_api.get_logs(options)
+        }
+
diff --git a/keystone-moon/keystone/contrib/moon/core.py b/keystone-moon/keystone/contrib/moon/core.py
new file mode 100644 (file)
index 0000000..1dc23c4
--- /dev/null
@@ -0,0 +1,2375 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
+
+from uuid import uuid4
+import os
+import json
+import copy
+import re
+import six
+
+from keystone.common import manager
+from keystone import config
+from oslo_log import log
+from keystone.common import dependency
+from keystone import exception
+from oslo_config import cfg
+from keystone.i18n import _
+
+from keystone.contrib.moon.exception import *
+
+CONF = config.CONF
+LOG = log.getLogger(__name__)
+
+_OPTS = [
+    cfg.StrOpt('authz_driver',
+               default='keystone.contrib.moon.backends.flat.SuperExtensionConnector',
+               help='Authorisation backend driver.'),
+    cfg.StrOpt('log_driver',
+               default='keystone.contrib.moon.backends.flat.LogConnector',
+               help='Logs backend driver.'),
+    cfg.StrOpt('superextension_driver',
+               default='keystone.contrib.moon.backends.flat.SuperExtensionConnector',
+               help='SuperExtension backend driver.'),
+    cfg.StrOpt('intraextension_driver',
+               default='keystone.contrib.moon.backends.sql.IntraExtensionConnector',
+               help='IntraExtension backend driver.'),
+    cfg.StrOpt('tenant_driver',
+               default='keystone.contrib.moon.backends.sql.TenantConnector',
+               help='Tenant backend driver.'),
+    cfg.StrOpt('interextension_driver',
+               default='keystone.contrib.moon.backends.sql.InterExtensionConnector',
+               help='InterExtension backend driver.'),
+    cfg.StrOpt('policy_directory',
+               default='/etc/keystone/policies',
+               help='Local directory where all policies are stored.'),
+    cfg.StrOpt('super_extension_directory',
+               default='/etc/keystone/super_extension',
+               help='Local directory where SuperExtension configuration is stored.'),
+]
+CONF.register_opts(_OPTS, group='moon')
+
+
+def filter_args(func):
+    def wrapped(*args, **kwargs):
+        _args = []
+        for arg in args:
+            if type(arg) in (unicode, str):
+                arg = "".join(re.findall("[\w\-+]*", arg))
+            _args.append(arg)
+        for arg in kwargs:
+            if type(kwargs[arg]) in (unicode, str):
+                kwargs[arg] = "".join(re.findall("[\w\-+]*", kwargs[arg]))
+        return func(*_args, **kwargs)
+    return wrapped
+
+
+def enforce(actions, object, **extra):
+    def wrap(func):
+        def wrapped(*args):
+            global actions
+            self = args[0]
+            user_name = args[1]
+            intra_extension_uuid = args[2]
+            _admin_extension_uuid = self.tenant_api.get_admin_extension_uuid(args[2])
+            # func.func_globals["_admin_extension_uuid"] = _admin_extension_uuid
+            if not _admin_extension_uuid:
+                args[0].moonlog_api.warning("No admin IntraExtension found, authorization granted by default.")
+                return func(*args)
+            else:
+                _authz = False
+                if type(actions) in (str, unicode):
+                    actions = (actions, )
+                for action in actions:
+                    if self.authz_api.authz(
+                            intra_extension_uuid,
+                            user_name,
+                            object,
+                            action):
+                        _authz = True
+                    else:
+                        _authz = False
+                        break
+                if _authz:
+                    return func(*args)
+        return wrapped
+    return wrap
+
+
+def filter_input(data):
+    if type(data) not in (str, unicode):
+        return data
+    try:
+        return "".join(re.findall("[\w\-+*]", data))
+    except TypeError:
+        LOG.error("Error in filtering input data: {}".format(data))
+
+
+@dependency.provider('moonlog_api')
+class LogManager(manager.Manager):
+
+    def __init__(self):
+        driver = CONF.moon.log_driver
+        super(LogManager, self).__init__(driver)
+
+    def get_logs(self, options):
+        return self.driver.get_logs(options)
+
+    def authz(self, message):
+        return self.driver.authz(message)
+
+    def debug(self, message):
+        return self.driver.debug(message)
+
+    def info(self, message):
+        return self.driver.info(message)
+
+    def warning(self, message):
+        return self.driver.warning(message)
+
+    def error(self, message):
+        return self.driver.error(message)
+
+    def critical(self, message):
+        return self.driver.critical(message)
+
+
+@dependency.provider('tenant_api')
+@dependency.requires('moonlog_api')
+class TenantManager(manager.Manager):
+
+    def __init__(self):
+        super(TenantManager, self).__init__(CONF.moon.tenant_driver)
+
+    def get_tenant_dict(self):
+        """
+        Return a dictionnary with all tenants
+        :return: dict
+        """
+        try:
+            return self.driver.get_tenant_dict()
+        except TenantListEmptyError:
+            self.moonlog_api.error(_("Tenant Mapping list is empty."))
+            return {}
+
+    def get_tenant_name(self, tenant_uuid):
+        _tenant_dict = self.get_tenant_dict()
+        if tenant_uuid not in _tenant_dict:
+            raise TenantNotFoundError(_("Tenant UUID ({}) was not found.".format(tenant_uuid)))
+        return _tenant_dict[tenant_uuid]["name"]
+
+    def set_tenant_name(self, tenant_uuid, tenant_name):
+        _tenant_dict = self.get_tenant_dict()
+        if tenant_uuid not in _tenant_dict:
+            raise TenantNotFoundError(_("Tenant UUID ({}) was not found.".format(tenant_uuid)))
+        _tenant_dict[tenant_uuid]['name'] = tenant_name
+        return self.driver.set_tenant_dict(_tenant_dict)
+
+    def get_extension_uuid(self, tenant_uuid, scope="authz"):
+        """
+        Return the UUID of the scoped extension for a particular tenant.
+        :param tenant_uuid: UUID of the tenant
+        :param scope: "admin" or "authz"
+        :return (str): the UUID of the scoped extension
+        """
+        # 1 tenant only with 1 authz extension and 1 admin extension
+        _tenant_dict = self.get_tenant_dict()
+        if tenant_uuid not in _tenant_dict:
+            raise TenantNotFoundError(_("Tenant UUID ({}) was not found.".format(tenant_uuid)))
+        if not _tenant_dict[tenant_uuid][scope]:
+            raise IntraExtensionNotFound(_("No IntraExtension found for Tenant {}.".format(tenant_uuid)))
+        return _tenant_dict[tenant_uuid][scope]
+
+    def get_tenant_uuid(self, extension_uuid):
+        for _tenant_uuid, _tenant_value in six.iteritems(self.get_tenant_dict()):
+            if extension_uuid == _tenant_value["authz"] or extension_uuid == _tenant_value["admin"]:
+                return _tenant_uuid
+        raise TenantNotFoundError()
+
+    def get_admin_extension_uuid(self, authz_extension_uuid):
+        _tenants = self.get_tenant_dict()
+        for _tenant_uuid in _tenants:
+            if authz_extension_uuid == _tenants[_tenant_uuid]['authz']and _tenants[_tenant_uuid]['admin']:
+                    return _tenants[_tenant_uuid]['admin']
+        self.moonlog_api.error(_("No IntraExtension found mapping this Authz IntraExtension: {}.".format(
+                               authz_extension_uuid)))
+        # FIXME (dthom): if AdminIntraExtensionNotFound, maybe we can add an option in configuration file
+        # to allow or not the fact that Admin IntraExtension can be None
+        # raise AdminIntraExtensionNotFound()
+
+    def delete(self, authz_extension_uuid):
+        _tenants = self.get_tenant_dict()
+        for _tenant_uuid in _tenants:
+            if authz_extension_uuid == _tenants[_tenant_uuid]['authz']:
+                return self.set_tenant_dict(_tenant_uuid, "", "", "")
+        raise AuthzIntraExtensionNotFound(_("No IntraExtension found mapping this Authz IntraExtension: {}.".format(
+            authz_extension_uuid)))
+
+    def set_tenant_dict(self, tenant_uuid, name, authz_extension_uuid, admin_extension_uuid):
+        tenant = {
+            tenant_uuid: {
+                "name": name,
+                "authz": authz_extension_uuid,
+                "admin": admin_extension_uuid
+            }
+        }
+        # TODO (dthom): Tenant must be checked against Keystone database.
+        return self.driver.set_tenant_dict(tenant)
+
+
+class TenantDriver:
+
+    def get_tenant_dict(self):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def set_tenant_dict(self, tenant):
+        raise exception.NotImplemented()  # pragma: no cover
+
+
+@dependency.requires('identity_api', 'moonlog_api', 'tenant_api', 'authz_api')
+class IntraExtensionManager(manager.Manager):
+
+    __genre__ = None
+
+    def __init__(self):
+        driver = CONF.moon.intraextension_driver
+        super(IntraExtensionManager, self).__init__(driver)
+
+    def authz(self, uuid, sub, obj, act):
+        """Check authorization for a particular action.
+
+        :param uuid: UUID of an IntraExtension
+        :param sub: subject of the request
+        :param obj: object of the request
+        :param act: action of the request
+        :return: True or False or raise an exception
+        """
+        if not self.driver.get_intra_extension(uuid):
+            raise IntraExtensionNotFound()
+        # self.moonlog_api.authz("Unknown: Authorization framework disabled ({} {} {} {})".format(uuid, sub, obj, act))
+        # self.moonlog_api.warning("Unknown: Authorization framework disabled ({} {} {} {})".format(uuid, sub, obj, act))
+        # return True
+        # #TODO (dthom): must raise IntraExtensionNotAuthorized
+        # try:
+        #     _subject_category_dict = self.driver.get_subject_category_dict(extension_uuid)
+        #     _object_category_dict = self.driver.get_object_category_dict(extension_uuid)
+        #     _action_category_dict = self.driver.get_action_category_dict(extension_uuid)
+        #     _subject_category_value_dict = self.driver.get_subject_category_value_dict(extension_uuid, subject_name)
+        #     _object_category_value_dict = self.driver.get_object_category_value_dict(extension_uuid, object_name)
+        #     _action_category_value_dict = self.driver.get_action_category_value_dict(extension_uuid, action_name)
+        #     _meta_rule = self.driver.get_meta_rule(extension_uuid)
+        #     _rules = self.driver.get_rules(extension_uuid)
+        #     # TODO: algorithm to validate requests
+        #     return True
+        # except exception:  # TODO: exception.IntraExtension.NotAuthorized
+        #     pass
+        sub_meta_rule = self.driver.get_meta_rule(uuid)
+        subject_assignments = self.driver.get_subject_category_assignment_dict(uuid)
+        action_assignments = self.driver.get_action_category_assignment_dict(uuid)
+        object_assignments = self.driver.get_object_category_assignment_dict(uuid)
+        # check if subject exists
+        if sub not in self.driver.get_subject_dict(uuid):
+            self.moonlog_api.authz("KO: Subject {} unknown".format(sub))
+            return False
+        # check if object exists
+        if obj not in self.driver.get_object_dict(uuid):
+            self.moonlog_api.authz("KO: Object {} unknown".format(obj))
+            return False
+        # check if action exists
+        if act not in self.driver.get_action_dict(uuid):
+            self.moonlog_api.authz("KO: Action {} unknown".format(act))
+            return False
+        # check if subject is in subject_assignment
+        for cat in subject_assignments.keys():
+            if sub in subject_assignments[cat]:
+                break
+        else:
+            self.moonlog_api.authz("KO: Subject no found in categories {}".format(
+                subject_assignments.keys()))
+            return False
+        # check if object is in object_assignment
+        for cat in object_assignments.keys():
+            if obj in object_assignments[cat]:
+                break
+        else:
+            self.moonlog_api.authz("KO: Object no found in categories {}".format(
+                object_assignments))
+            return False
+        # check if action is in action_assignment
+        for cat in action_assignments.keys():
+            if act in action_assignments[cat]:
+                break
+        else:
+            self.moonlog_api.authz("KO: Action no found in categories {}".format(
+                action_assignments.keys()))
+            return False
+        # get all rules for intra_extension
+        rules = self.driver.get_rules(uuid)
+        # check if relation exists in rules
+        relation_to_check = None
+        relations = self.driver.get_sub_meta_rule_relations(uuid)
+        for relation in rules:
+            if relation in relations:
+                # hypothesis: only one relation to check
+                relation_to_check = relation
+                break
+        else:
+            self.moonlog_api.authz("KO: No relation can be used {}".format(rules.keys()))
+            return False
+        for sub_rule in rules[relation_to_check]:
+            for cat in sub_meta_rule[relation_to_check]["subject_categories"]:
+                rule_scope = sub_rule.pop(0)
+                if rule_scope in subject_assignments[cat][sub]:
+                    break
+            else:
+                continue
+            for cat in sub_meta_rule[relation_to_check]["action_categories"]:
+                rule_scope = sub_rule.pop(0)
+                if rule_scope in action_assignments[cat][act]:
+                    break
+            else:
+                continue
+            for cat in sub_meta_rule[relation_to_check]["object_categories"]:
+                rule_scope = sub_rule.pop(0)
+                if rule_scope in object_assignments[cat][obj]:
+                    break
+            else:
+                continue
+            self.moonlog_api.authz("OK ({} {},{},{})".format(uuid, sub, act, obj))
+            return True
+        self.moonlog_api.authz("KO ({} {},{},{})".format(uuid, sub, act, obj))
+        return False
+
+    def __get_key_from_value(self, value, values_dict):
+        return filter(lambda v: v[1] == value, values_dict.iteritems())[0][0]
+
+    def get_intra_extension_list(self):
+        # TODO: check will be done through super_extension later
+        return self.driver.get_intra_extension_list()
+
+    def get_intra_extension_id_for_tenant(self, tenant_id):
+        for intra_extension_id in self.driver.get_intra_extension_list():
+            if self.driver.get_intra_extension(intra_extension_id)["tenant"] == tenant_id:
+                return intra_extension_id
+        LOG.error("IntraExtension not found for tenant {}".format(tenant_id))
+        raise exception.NotFound
+
+    def get_intra_extension(self, uuid):
+        return self.driver.get_intra_extension(uuid)
+
+    def set_perimeter_values(self, ie, policy_dir):
+
+        perimeter_path = os.path.join(policy_dir, 'perimeter.json')
+        f = open(perimeter_path)
+        json_perimeter = json.load(f)
+
+        subject_dict = dict()
+        # We suppose that all subjects can be mapped to a true user in Keystone
+        for _subject in json_perimeter['subjects']:
+            user = self.identity_api.get_user_by_name(_subject, "default")
+            subject_dict[user["id"]] = user["name"]
+        self.driver.set_subject_dict(ie["id"], subject_dict)
+        ie["subjects"] = subject_dict
+
+        # Copy all values for objects and subjects
+        object_dict = dict()
+        for _object in json_perimeter['objects']:
+            object_dict[uuid4().hex] = _object
+        self.driver.set_object_dict(ie["id"], object_dict)
+        ie["objects"] = object_dict
+
+        action_dict = dict()
+        for _action in json_perimeter['actions']:
+            action_dict[uuid4().hex] = _action
+        self.driver.set_action_dict(ie["id"], action_dict)
+        ie["ations"] = action_dict
+
+    def set_metadata_values(self, ie, policy_dir):
+
+        metadata_path = os.path.join(policy_dir, 'metadata.json')
+        f = open(metadata_path)
+        json_perimeter = json.load(f)
+
+        subject_categories_dict = dict()
+        for _cat in json_perimeter['subject_categories']:
+            subject_categories_dict[uuid4().hex] = _cat
+        self.driver.set_subject_category_dict(ie["id"], subject_categories_dict)
+        # Initialize scope categories
+        for _cat in subject_categories_dict.keys():
+            self.driver.set_subject_category_scope_dict(ie["id"], _cat, {})
+        ie['subject_categories'] = subject_categories_dict
+
+        object_categories_dict = dict()
+        for _cat in json_perimeter['object_categories']:
+            object_categories_dict[uuid4().hex] = _cat
+        self.driver.set_object_category_dict(ie["id"], object_categories_dict)
+        # Initialize scope categories
+        for _cat in object_categories_dict.keys():
+            self.driver.set_object_category_scope_dict(ie["id"], _cat, {})
+        ie['object_categories'] = object_categories_dict
+
+        action_categories_dict = dict()
+        for _cat in json_perimeter['action_categories']:
+            action_categories_dict[uuid4().hex] = _cat
+        self.driver.set_action_category_dict(ie["id"], action_categories_dict)
+        # Initialize scope categories
+        for _cat in action_categories_dict.keys():
+            self.driver.set_action_category_scope_dict(ie["id"], _cat, {})
+        ie['action_categories'] = action_categories_dict
+
+    def set_scope_values(self, ie, policy_dir):
+
+        metadata_path = os.path.join(policy_dir, 'scope.json')
+        f = open(metadata_path)
+        json_perimeter = json.load(f)
+
+        ie['subject_category_scope'] = dict()
+        for category, scope in json_perimeter["subject_category_scope"].iteritems():
+            category = self.__get_key_from_value(
+                category,
+                self.driver.get_subject_category_dict(ie["id"])["subject_categories"])
+            _scope_dict = dict()
+            for _scope in scope:
+                _scope_dict[uuid4().hex] = _scope
+            self.driver.set_subject_category_scope_dict(ie["id"], category, _scope_dict)
+            ie['subject_category_scope'][category] = _scope_dict
+
+        ie['object_category_scope'] = dict()
+        for category, scope in json_perimeter["object_category_scope"].iteritems():
+            category = self.__get_key_from_value(
+                category,
+                self.driver.get_object_category_dict(ie["id"])["object_categories"])
+            _scope_dict = dict()
+            for _scope in scope:
+                _scope_dict[uuid4().hex] = _scope
+            self.driver.set_object_category_scope_dict(ie["id"], category, _scope_dict)
+            ie['object_category_scope'][category] = _scope_dict
+
+        ie['action_category_scope'] = dict()
+        for category, scope in json_perimeter["action_category_scope"].iteritems():
+            category = self.__get_key_from_value(
+                category,
+                self.driver.get_action_category_dict(ie["id"])["action_categories"])
+            _scope_dict = dict()
+            for _scope in scope:
+                _scope_dict[uuid4().hex] = _scope
+            self.driver.set_action_category_scope_dict(ie["id"], category, _scope_dict)
+            ie['action_category_scope'][category] = _scope_dict
+
+    def set_assignments_values(self, ie, policy_dir):
+
+        f = open(os.path.join(policy_dir, 'assignment.json'))
+        json_assignments = json.load(f)
+
+        subject_assignments = dict()
+        for category, value in json_assignments['subject_assignments'].iteritems():
+            category = self.__get_key_from_value(
+                category,
+                self.driver.get_subject_category_dict(ie["id"])["subject_categories"])
+            for user in value:
+                if user not in subject_assignments:
+                    subject_assignments[user] = dict()
+                    subject_assignments[user][category] = \
+                        map(lambda x: self.__get_key_from_value(x, ie['subject_category_scope'][category]), value[user])
+                else:
+                    subject_assignments[user][category].extend(
+                        map(lambda x: self.__get_key_from_value(x, ie['subject_category_scope'][category]), value[user])
+                    )
+        # Note (dthom): subject_category_assignment must be initialized because when there is no data in json
+        # we will not go through the for loop
+        self.driver.set_subject_category_assignment_dict(ie["id"])
+        for subject in subject_assignments:
+            self.driver.set_subject_category_assignment_dict(ie["id"], subject, subject_assignments[subject])
+
+        object_assignments = dict()
+        for category, value in json_assignments["object_assignments"].iteritems():
+            category = self.__get_key_from_value(
+                category,
+                self.driver.get_object_category_dict(ie["id"])["object_categories"])
+            for object_name in value:
+                if object_name not in object_assignments:
+                    object_assignments[object_name] = dict()
+                    object_assignments[object_name][category] = \
+                        map(lambda x: self.__get_key_from_value(x, ie['object_category_scope'][category]),
+                            value[object_name])
+                else:
+                    object_assignments[object_name][category].extend(
+                        map(lambda x: self.__get_key_from_value(x, ie['object_category_scope'][category]),
+                            value[object_name])
+                    )
+        # Note (dthom): object_category_assignment must be initialized because when there is no data in json
+        # we will not go through the for loop
+        self.driver.set_object_category_assignment_dict(ie["id"])
+        for object in object_assignments:
+            self.driver.set_object_category_assignment_dict(ie["id"], object, object_assignments[object])
+
+        action_assignments = dict()
+        for category, value in json_assignments["action_assignments"].iteritems():
+            category = self.__get_key_from_value(
+                category,
+                self.driver.get_action_category_dict(ie["id"])["action_categories"])
+            for action_name in value:
+                if action_name not in action_assignments:
+                    action_assignments[action_name] = dict()
+                    action_assignments[action_name][category] = \
+                        map(lambda x: self.__get_key_from_value(x, ie['action_category_scope'][category]),
+                            value[action_name])
+                else:
+                    action_assignments[action_name][category].extend(
+                        map(lambda x: self.__get_key_from_value(x, ie['action_category_scope'][category]),
+                            value[action_name])
+                    )
+        # Note (dthom): action_category_assignment must be initialized because when there is no data in json
+        # we will not go through the for loop
+        self.driver.set_action_category_assignment_dict(ie["id"])
+        for action in action_assignments:
+            self.driver.set_action_category_assignment_dict(ie["id"], action, action_assignments[action])
+
+    def set_metarule_values(self, ie, policy_dir):
+
+        metadata_path = os.path.join(policy_dir, 'metarule.json')
+        f = open(metadata_path)
+        json_metarule = json.load(f)
+        # ie["meta_rules"] = copy.deepcopy(json_metarule)
+        metarule = dict()
+        categories = {
+            "subject_categories": self.driver.get_subject_category_dict(ie["id"]),
+            "object_categories": self.driver.get_object_category_dict(ie["id"]),
+            "action_categories": self.driver.get_action_category_dict(ie["id"])
+        }
+        # Translate value from JSON file to UUID for Database
+        for relation in json_metarule["sub_meta_rules"]:
+            metarule[relation] = dict()
+            for item in ("subject_categories", "object_categories", "action_categories"):
+                metarule[relation][item] = list()
+                for element in json_metarule["sub_meta_rules"][relation][item]:
+                    metarule[relation][item].append(self.__get_key_from_value(
+                        element,
+                        categories[item][item]
+                    ))
+        submetarules = {
+            "aggregation": json_metarule["aggregation"],
+            "sub_meta_rules": metarule
+        }
+        self.driver.set_meta_rule_dict(ie["id"], submetarules)
+
+    def set_subrules_values(self, ie, policy_dir):
+
+        metadata_path = os.path.join(policy_dir, 'rules.json')
+        f = open(metadata_path)
+        json_rules = json.load(f)
+        ie["sub_rules"] = {"rules": copy.deepcopy(json_rules)}
+        # Translate value from JSON file to UUID for Database
+        rules = dict()
+        sub_meta_rules = self.driver.get_meta_rule_dict(ie["id"])
+        for relation in json_rules:
+            if relation not in self.get_sub_meta_rule_relations("admin", ie["id"])["sub_meta_rule_relations"]:
+                raise IntraExtensionError("Bad relation name {} in rules".format(relation))
+            rules[relation] = list()
+            for rule in json_rules[relation]:
+                subrule = list()
+                for cat, cat_func in (
+                    ("subject_categories", self.driver.get_subject_category_scope_dict),
+                    ("action_categories", self.driver.get_action_category_scope_dict),
+                    ("object_categories", self.driver.get_object_category_scope_dict),
+                ):
+                    for cat_value in sub_meta_rules["sub_meta_rules"][relation][cat]:
+                        scope = cat_func(
+                            ie["id"],
+                            cat_value
+                        )[cat_func.__name__.replace("get_", "").replace("_dict", "")]
+
+                        _ = rule.pop(0)
+                        a_scope = self.__get_key_from_value(_, scope[cat_value])
+                        subrule.append(a_scope)
+                # if a positive/negative value exists, all titem of rule have not be consumed
+                if len(rule) >= 1 and type(rule[0]) is bool:
+                    subrule.append(rule[0])
+                else:
+                    # if value doesn't exist add a default value
+                    subrule.append(True)
+                rules[relation].append(subrule)
+        self.driver.set_rules(ie["id"], rules)
+
+    def load_intra_extension(self, intra_extension):
+        ie = dict()
+        # TODO: clean some values
+        ie['id'] = uuid4().hex
+        ie["name"] = filter_input(intra_extension["name"])
+        ie["model"] = filter_input(intra_extension["policymodel"])
+        ie["description"] = filter_input(intra_extension["description"])
+        ref = self.driver.set_intra_extension(ie['id'], ie)
+        self.moonlog_api.debug("Creation of IE: {}".format(ref))
+        # read the profile given by "policymodel" and populate default variables
+        policy_dir = os.path.join(CONF.moon.policy_directory, ie["model"])
+        self.set_perimeter_values(ie, policy_dir)
+        self.set_metadata_values(ie, policy_dir)
+        self.set_scope_values(ie, policy_dir)
+        self.set_assignments_values(ie, policy_dir)
+        self.set_metarule_values(ie, policy_dir)
+        self.set_subrules_values(ie, policy_dir)
+        return ref
+
+    def delete_intra_extension(self, intra_extension_id):
+        ref = self.driver.delete_intra_extension(intra_extension_id)
+        return ref
+
+    # Perimeter functions
+
+    @filter_args
+    @enforce("read", "subjects")
+    def get_subject_dict(self, user_name, intra_extension_uuid):
+        return self.driver.get_subject_dict(intra_extension_uuid)
+
+    @filter_args
+    @enforce(("read", "write"), "subjects")
+    def set_subject_dict(self, user_name, intra_extension_uuid, subject_dict):
+        for uuid in subject_dict:
+            # Next line will raise an error if user is not present in Keystone database
+            self.identity_api.get_user(uuid)
+        return self.driver.set_subject_dict(intra_extension_uuid, subject_dict)
+
+    @filter_args
+    @enforce(("read", "write"), "subjects")
+    def add_subject_dict(self, user_name, intra_extension_uuid, subject_uuid):
+        # Next line will raise an error if user is not present in Keystone database
+        user = self.identity_api.get_user(subject_uuid)
+        return self.driver.add_subject(intra_extension_uuid, subject_uuid, user["name"])
+
+    @filter_args
+    @enforce("write", "subjects")
+    def del_subject(self, user_name, intra_extension_uuid, subject_uuid):
+        self.driver.remove_subject(intra_extension_uuid, subject_uuid)
+
+    @filter_args
+    @enforce("read", "objects")
+    def get_object_dict(self, user_name, intra_extension_uuid):
+        return self.driver.get_object_dict(intra_extension_uuid)
+
+    @filter_args
+    @enforce(("read", "write"), "objects")
+    def set_object_dict(self, user_name, intra_extension_uuid, object_dict):
+        return self.driver.set_object_dict(intra_extension_uuid, object_dict)
+
+    @filter_args
+    @enforce(("read", "write"), "objects")
+    def add_object_dict(self, user_name, intra_extension_uuid, object_name):
+        object_uuid = uuid4().hex
+        return self.driver.add_object(intra_extension_uuid, object_uuid, object_name)
+
+    @filter_args
+    @enforce("write", "objects")
+    def del_object(self, user_name, intra_extension_uuid, object_uuid):
+        self.driver.remove_object(intra_extension_uuid, object_uuid)
+
+    @filter_args
+    @enforce("read", "actions")
+    def get_action_dict(self, user_name, intra_extension_uuid):
+        return self.driver.get_action_dict(intra_extension_uuid)
+
+    @filter_args
+    @enforce(("read", "write"), "actions")
+    def set_action_dict(self, user_name, intra_extension_uuid, action_dict):
+        return self.driver.set_action_dict(intra_extension_uuid, action_dict)
+
+    @filter_args
+    @enforce(("read", "write"), "actions")
+    def add_action_dict(self, user_name, intra_extension_uuid, action_name):
+        action_uuid = uuid4().hex
+        return self.driver.add_action(intra_extension_uuid, action_uuid, action_name)
+
+    @filter_args
+    @enforce("write", "actions")
+    def del_action(self, user_name, intra_extension_uuid, action_uuid):
+        self.driver.remove_action(intra_extension_uuid, action_uuid)
+
+    # Metadata functions
+
+    @filter_args
+    @enforce("read", "subject_categories")
+    def get_subject_category_dict(self, user_name, intra_extension_uuid):
+        return self.driver.get_subject_category_dict(intra_extension_uuid)
+
+    @filter_args
+    @enforce("read", "subject_categories")
+    @enforce("read", "subject_category_scope")
+    @enforce("write", "subject_category_scope")
+    def set_subject_category_dict(self, user_name, intra_extension_uuid, subject_category):
+        subject_category_dict = self.driver.set_subject_category_dict(intra_extension_uuid, subject_category)
+        # if we add a new category, we must add it to the subject_category_scope
+        for _cat in subject_category.keys():
+            try:
+                _ = self.driver.get_subject_category_scope_dict(intra_extension_uuid, _cat)
+            except CategoryNotFound:
+                self.driver.set_subject_category_scope_dict(intra_extension_uuid, _cat, {})
+        return subject_category_dict
+
+    @filter_args
+    @enforce("read", "subject_categories")
+    @enforce("write", "subject_categories")
+    def add_subject_category_dict(self, user_name, intra_extension_uuid, subject_category_name):
+        subject_category_uuid = uuid4().hex
+        return self.driver.add_subject_category_dict(intra_extension_uuid, subject_category_uuid, subject_category_name)
+
+    @filter_args
+    @enforce("write", "subject_categories")
+    def del_subject_category(self, user_name, intra_extension_uuid, subject_uuid):
+        return self.driver.remove_subject_category(intra_extension_uuid, subject_uuid)
+
+    @filter_args
+    @enforce("read", "object_categories")
+    def get_object_category_dict(self, user_name, intra_extension_uuid):
+        return self.driver.get_object_category_dict(intra_extension_uuid)
+
+    @filter_args
+    @enforce("read", "object_categories")
+    @enforce("read", "object_category_scope")
+    @enforce("write", "object_category_scope")
+    def set_object_category_dict(self, user_name, intra_extension_uuid, object_category):
+        object_category_dict = self.driver.set_object_category_dict(intra_extension_uuid, object_category)
+        # if we add a new category, we must add it to the object_category_scope
+        for _cat in object_category.keys():
+            try:
+                _ = self.driver.get_object_category_scope_dict(intra_extension_uuid, _cat)
+            except CategoryNotFound:
+                self.driver.set_object_category_scope_dict(intra_extension_uuid, _cat, {})
+        return object_category_dict
+
+    @filter_args
+    @enforce("read", "object_categories")
+    @enforce("write", "object_categories")
+    def add_object_category_dict(self, user_name, intra_extension_uuid, object_category_name):
+        object_category_uuid = uuid4().hex
+        return self.driver.add_object_category_dict(intra_extension_uuid, object_category_uuid, object_category_name)
+
+    @filter_args
+    @enforce("write", "object_categories")
+    def del_object_category(self, user_name, intra_extension_uuid, object_uuid):
+        return self.driver.remove_object_category(intra_extension_uuid, object_uuid)
+
+    @filter_args
+    @enforce("read", "action_categories")
+    def get_action_category_dict(self, user_name, intra_extension_uuid):
+        return self.driver.get_action_category_dict(intra_extension_uuid)
+
+    @filter_args
+    @enforce("read", "action_categories")
+    @enforce("read", "action_category_scope")
+    @enforce("write", "action_category_scope")
+    def set_action_category_dict(self, user_name, intra_extension_uuid, action_category):
+        action_category_dict = self.driver.set_action_category_dict(intra_extension_uuid, action_category)
+        # if we add a new category, we must add it to the action_category_scope
+        for _cat in action_category.keys():
+            try:
+                _ = self.driver.get_action_category_scope_dict(intra_extension_uuid, _cat)
+            except CategoryNotFound:
+                self.driver.set_action_category_scope_dict(intra_extension_uuid, _cat, {})
+        return action_category_dict
+
+    @filter_args
+    @enforce("read", "action_categories")
+    @enforce("write", "action_categories")
+    def add_action_category_dict(self, user_name, intra_extension_uuid, action_category_name):
+        action_category_uuid = uuid4().hex
+        return self.driver.add_action_category_dict(intra_extension_uuid, action_category_uuid, action_category_name)
+
+    @filter_args
+    @enforce("write", "action_categories")
+    def del_action_category(self, user_name, intra_extension_uuid, action_uuid):
+        return self.driver.remove_action_category(intra_extension_uuid, action_uuid)
+
+    # Scope functions
+    @filter_args
+    @enforce("read", "subject_category_scope")
+    @enforce("read", "subject_category")
+    def get_subject_category_scope_dict(self, user_name, intra_extension_uuid, category):
+        if category not in self.get_subject_category_dict(user_name, intra_extension_uuid)["subject_categories"]:
+            raise IntraExtensionError("Subject category {} is unknown.".format(category))
+        return self.driver.get_subject_category_scope_dict(intra_extension_uuid, category)
+
+    @filter_args
+    @enforce("read", "subject_category_scope")
+    @enforce("read", "subject_category")
+    def set_subject_category_scope_dict(self, user_name, intra_extension_uuid, category, scope):
+        if category not in self.get_subject_category_dict(user_name, intra_extension_uuid)["subject_categories"]:
+            raise IntraExtensionError("Subject category {} is unknown.".format(category))
+        return self.driver.set_subject_category_scope_dict(intra_extension_uuid, category, scope)
+
+    @filter_args
+    @enforce(("read", "write"), "subject_category_scope")
+    @enforce("read", "subject_category")
+    def add_subject_category_scope_dict(self, user_name, intra_extension_uuid, subject_category, scope_name):
+        subject_categories = self.get_subject_category_dict(user_name, intra_extension_uuid)
+        # check if subject_category exists in database
+        if subject_category not in subject_categories["subject_categories"]:
+            raise IntraExtensionError("Subject category {} is unknown.".format(subject_category))
+        scope_uuid = uuid4().hex
+        return self.driver.add_subject_category_scope_dict(
+            intra_extension_uuid,
+            subject_category,
+            scope_uuid,
+            scope_name)
+
+    @filter_args
+    @enforce("write", "subject_category_scope")
+    @enforce("read", "subject_category")
+    def del_subject_category_scope(self, user_name, intra_extension_uuid, subject_category, subject_category_scope):
+        subject_categories = self.get_subject_category_dict(user_name, intra_extension_uuid)
+        # check if subject_category exists in database
+        if subject_category not in subject_categories["subject_categories"]:
+            raise IntraExtensionError("Subject category {} is unknown.".format(subject_category))
+        return self.driver.remove_subject_category_scope_dict(
+            intra_extension_uuid,
+            subject_category,
+            subject_category_scope)
+
+    @filter_args
+    @enforce("read", "object_category_scope")
+    @enforce("read", "object_category")
+    def get_object_category_scope_dict(self, user_name, intra_extension_uuid, category):
+        if category not in self.get_object_category_dict(user_name, intra_extension_uuid)["object_categories"]:
+            raise IntraExtensionError("Object category {} is unknown.".format(category))
+        return self.driver.get_object_category_scope_dict(intra_extension_uuid, category)
+
+    @filter_args
+    @enforce("read", "object_category_scope")
+    @enforce("read", "object_category")
+    def set_object_category_scope_dict(self, user_name, intra_extension_uuid, category, scope):
+        if category not in self.get_object_category_dict(user_name, intra_extension_uuid)["object_categories"]:
+            raise IntraExtensionError("Object category {} is unknown.".format(category))
+        return self.driver.set_object_category_scope_dict(intra_extension_uuid, category, scope)
+
+    @filter_args
+    @enforce(("read", "write"), "object_category_scope")
+    @enforce("read", "object_category")
+    def add_object_category_scope_dict(self, user_name, intra_extension_uuid, object_category, scope_name):
+        object_categories = self.get_object_category_dict(user_name, intra_extension_uuid)
+        # check if object_category exists in database
+        if object_category not in object_categories["object_categories"]:
+            raise IntraExtensionError("Object category {} is unknown.".format(object_category))
+        scope_uuid = uuid4().hex
+        return self.driver.add_object_category_scope_dict(
+            intra_extension_uuid,
+            object_category,
+            scope_uuid,
+            scope_name)
+
+    @filter_args
+    @enforce("write", "object_category_scope")
+    @enforce("read", "object_category")
+    def del_object_category_scope(self, user_name, intra_extension_uuid, object_category, object_category_scope):
+        object_categories = self.get_object_category_dict(user_name, intra_extension_uuid)
+        # check if object_category exists in database
+        if object_category not in object_categories["object_categories"]:
+            raise IntraExtensionError("Object category {} is unknown.".format(object_category))
+        return self.driver.remove_object_category_scope_dict(
+            intra_extension_uuid,
+            object_category,
+            object_category_scope)
+
+    @filter_args
+    @enforce("read", "action_category_scope")
+    @enforce("read", "action_category")
+    def get_action_category_scope_dict(self, user_name, intra_extension_uuid, category):
+        if category not in self.get_action_category_dict(user_name, intra_extension_uuid)["action_categories"]:
+            raise IntraExtensionError("Action category {} is unknown.".format(category))
+        return self.driver.get_action_category_scope_dict(intra_extension_uuid, category)
+
+    @filter_args
+    @enforce(("read", "write"), "action_category_scope")
+    @enforce("read", "action_category")
+    def set_action_category_scope_dict(self, user_name, intra_extension_uuid, category, scope):
+        if category not in self.get_action_category_dict(user_name, intra_extension_uuid)["action_categories"]:
+            raise IntraExtensionError("Action category {} is unknown.".format(category))
+        return self.driver.set_action_category_scope_dict(intra_extension_uuid, category, scope)
+
+    @filter_args
+    @enforce(("read", "write"), "action_category_scope")
+    @enforce("read", "action_category")
+    def add_action_category_scope_dict(self, user_name, intra_extension_uuid, action_category, scope_name):
+        action_categories = self.get_action_category_dict(user_name, intra_extension_uuid)
+        # check if action_category exists in database
+        if action_category not in action_categories["action_categories"]:
+            raise IntraExtensionError("Action category {} is unknown.".format(action_category))
+        scope_uuid = uuid4().hex
+        return self.driver.add_action_category_scope_dict(
+            intra_extension_uuid,
+            action_category,
+            scope_uuid,
+            scope_name)
+
+    @filter_args
+    @enforce("write", "action_category_scope")
+    @enforce("read", "action_category")
+    def del_action_category_scope(self, user_name, intra_extension_uuid, action_category, action_category_scope):
+        action_categories = self.get_action_category_dict(user_name, intra_extension_uuid)
+        # check if action_category exists in database
+        if action_category not in action_categories["action_categories"]:
+            raise IntraExtensionError("Action category {} is unknown.".format(action_category))
+        return self.driver.remove_action_category_scope_dict(
+            intra_extension_uuid,
+            action_category,
+            action_category_scope)
+
+    # Assignment functions
+
+    @filter_args
+    @enforce("read", "subject_category_assignment")
+    @enforce("read", "subjects")
+    def get_subject_category_assignment_dict(self, user_name, intra_extension_uuid, subject_uuid):
+        # check if subject exists in database
+        if subject_uuid not in self.get_subject_dict(user_name, intra_extension_uuid)["subjects"]:
+            LOG.error("add_subject_assignment: unknown subject_id {}".format(subject_uuid))
+            raise IntraExtensionError("Bad input data")
+        return self.driver.get_subject_category_assignment_dict(intra_extension_uuid, subject_uuid)
+
+    @filter_args
+    @enforce("read", "subject_category_assignment")
+    @enforce("write", "subject_category_assignment")
+    @enforce("read", "subjects")
+    def set_subject_category_assignment_dict(self, user_name, intra_extension_uuid, subject_uuid, assignment_dict):
+        # check if subject exists in database
+        if subject_uuid not in self.get_subject_dict(user_name, intra_extension_uuid)["subjects"]:
+            LOG.error("add_subject_assignment: unknown subject_id {}".format(subject_uuid))
+            raise IntraExtensionError("Bad input data")
+        return self.driver.set_subject_category_assignment_dict(intra_extension_uuid, subject_uuid, assignment_dict)
+
+    @filter_args
+    @enforce("read", "subject_category_assignment")
+    @enforce("write", "subject_category_assignment")
+    @enforce("read", "subjects")
+    @enforce("read", "subject_category")
+    def del_subject_category_assignment(self, user_name, intra_extension_uuid, subject_uuid, category_uuid, scope_uuid):
+        # check if category exists in database
+        if category_uuid not in self.get_subject_category_dict(user_name, intra_extension_uuid)["subject_categories"]:
+            LOG.error("add_subject_category_scope: unknown subject_category {}".format(category_uuid))
+            raise IntraExtensionError("Bad input data")
+        # check if subject exists in database
+        if subject_uuid not in self.get_subject_dict(user_name, intra_extension_uuid)["subjects"]:
+            LOG.error("add_subject_assignment: unknown subject_id {}".format(subject_uuid))
+            raise IntraExtensionError("Bad input data")
+        self.driver.remove_subject_category_assignment(intra_extension_uuid, subject_uuid, category_uuid, scope_uuid)
+
+    @filter_args
+    @enforce("write", "subject_category_assignment")
+    @enforce("read", "subjects")
+    @enforce("read", "subject_category")
+    def add_subject_category_assignment_dict(self, user_name, intra_extension_uuid, subject_uuid, category_uuid, scope_uuid):
+        # check if category exists in database
+        if category_uuid not in self.get_subject_category_dict(user_name, intra_extension_uuid)["subject_categories"]:
+            LOG.error("add_subject_category_scope: unknown subject_category {}".format(category_uuid))
+            raise IntraExtensionError("Bad input data")
+        # check if subject exists in database
+        if subject_uuid not in self.get_subject_dict(user_name, intra_extension_uuid)["subjects"]:
+            LOG.error("add_subject_assignment: unknown subject_id {}".format(subject_uuid))
+            raise IntraExtensionError("Bad input data")
+        return self.driver.add_subject_category_assignment_dict(intra_extension_uuid, subject_uuid, category_uuid, scope_uuid)
+
+    @filter_args
+    @enforce("read", "object_category_assignment")
+    @enforce("read", "objects")
+    def get_object_category_assignment_dict(self, user_name, intra_extension_uuid, object_uuid):
+        # check if object exists in database
+        if object_uuid not in self.get_object_dict(user_name, intra_extension_uuid)["objects"]:
+            LOG.error("add_object_assignment: unknown object_id {}".format(object_uuid))
+            raise IntraExtensionError("Bad input data")
+        return self.driver.get_object_category_assignment_dict(intra_extension_uuid, object_uuid)
+
+    @filter_args
+    @enforce("read", "object_category_assignment")
+    @enforce("write", "object_category_assignment")
+    @enforce("read", "objects")
+    def set_object_category_assignment_dict(self, user_name, intra_extension_uuid, object_uuid, assignment_dict):
+        # check if object exists in database
+        if object_uuid not in self.get_object_dict(user_name, intra_extension_uuid)["objects"]:
+            LOG.error("add_object_assignment: unknown object_id {}".format(object_uuid))
+            raise IntraExtensionError("Bad input data")
+        return self.driver.set_object_category_assignment_dict(intra_extension_uuid, object_uuid, assignment_dict)
+
+    @filter_args
+    @enforce("read", "object_category_assignment")
+    @enforce("write", "object_category_assignment")
+    @enforce("read", "objects")
+    @enforce("read", "object_category")
+    def del_object_category_assignment(self, user_name, intra_extension_uuid, object_uuid, category_uuid, scope_uuid):
+        # check if category exists in database
+        if category_uuid not in self.get_object_category_dict(user_name, intra_extension_uuid)["object_categories"]:
+            LOG.error("add_object_category_scope: unknown object_category {}".format(category_uuid))
+            raise IntraExtensionError("Bad input data")
+        # check if object exists in database
+        if object_uuid not in self.get_object_dict(user_name, intra_extension_uuid)["objects"]:
+            LOG.error("add_object_assignment: unknown object_id {}".format(object_uuid))
+            raise IntraExtensionError("Bad input data")
+        self.driver.remove_object_category_assignment(intra_extension_uuid, object_uuid, category_uuid, scope_uuid)
+
+    @filter_args
+    @enforce("write", "object_category_assignment")
+    @enforce("read", "objects")
+    @enforce("read", "object_category")
+    def add_object_category_assignment_dict(self, user_name, intra_extension_uuid, object_uuid, category_uuid, scope_uuid):
+        # check if category exists in database
+        if category_uuid not in self.get_object_category_dict(user_name, intra_extension_uuid)["object_categories"]:
+            LOG.error("add_object_category_scope: unknown object_category {}".format(category_uuid))
+            raise IntraExtensionError("Bad input data")
+        # check if object exists in database
+        if object_uuid not in self.get_object_dict(user_name, intra_extension_uuid)["objects"]:
+            LOG.error("add_object_assignment: unknown object_id {}".format(object_uuid))
+            raise IntraExtensionError("Bad input data")
+        return self.driver.add_object_category_assignment_dict(intra_extension_uuid, object_uuid, category_uuid, scope_uuid)
+
+    @filter_args
+    @enforce("read", "action_category_assignment")
+    @enforce("read", "actions")
+    def get_action_category_assignment_dict(self, user_name, intra_extension_uuid, action_uuid):
+        # check if action exists in database
+        if action_uuid not in self.get_action_dict(user_name, intra_extension_uuid)["actions"]:
+            LOG.error("add_action_assignment: unknown action_id {}".format(action_uuid))
+            raise IntraExtensionError("Bad input data")
+        return self.driver.get_action_category_assignment_dict(intra_extension_uuid, action_uuid)
+
+    @filter_args
+    @enforce("read", "action_category_assignment")
+    @enforce("write", "action_category_assignment")
+    @enforce("read", "actions")
+    def set_action_category_assignment_dict(self, user_name, intra_extension_uuid, action_uuid, assignment_dict):
+        # check if action exists in database
+        if action_uuid not in self.get_action_dict(user_name, intra_extension_uuid)["actions"]:
+            LOG.error("add_action_assignment: unknown action_id {}".format(action_uuid))
+            raise IntraExtensionError("Bad input data")
+        return self.driver.set_action_category_assignment_dict(intra_extension_uuid, action_uuid, assignment_dict)
+
+    @filter_args
+    @enforce("read", "action_category_assignment")
+    @enforce("write", "action_category_assignment")
+    @enforce("read", "actions")
+    @enforce("read", "action_category")
+    def del_action_category_assignment(self, user_name, intra_extension_uuid, action_uuid, category_uuid, scope_uuid):
+        # check if category exists in database
+        if category_uuid not in self.get_action_category_dict(user_name, intra_extension_uuid)["action_categories"]:
+            LOG.error("add_action_category_scope: unknown action_category {}".format(category_uuid))
+            raise IntraExtensionError("Bad input data")
+        # check if action exists in database
+        if action_uuid not in self.get_action_dict(user_name, intra_extension_uuid)["actions"]:
+            LOG.error("add_action_assignment: unknown action_id {}".format(action_uuid))
+            raise IntraExtensionError("Bad input data")
+        self.driver.remove_action_category_assignment(intra_extension_uuid, action_uuid, category_uuid, scope_uuid)
+
+    @filter_args
+    @enforce("write", "action_category_assignment")
+    @enforce("read", "actions")
+    @enforce("read", "action_category")
+    def add_action_category_assignment_dict(self, user_name, intra_extension_uuid, action_uuid, category_uuid, scope_uuid):
+        # check if category exists in database
+        if category_uuid not in self.get_action_category_dict(user_name, intra_extension_uuid)["action_categories"]:
+            LOG.error("add_action_category_scope: unknown action_category {}".format(category_uuid))
+            raise IntraExtensionError("Bad input data")
+        # check if action exists in database
+        if action_uuid not in self.get_action_dict(user_name, intra_extension_uuid)["actions"]:
+            LOG.error("add_action_assignment: unknown action_id {}".format(action_uuid))
+            raise IntraExtensionError("Bad input data")
+        return self.driver.add_action_category_assignment_dict(
+            intra_extension_uuid,
+            action_uuid,
+            category_uuid,
+            scope_uuid
+        )
+
+    # Metarule functions
+    @filter_args
+    def get_aggregation_algorithms(self, user_name, intra_extension_uuid):
+        # TODO: check which algorithms are really usable
+        return {"aggregation_algorithms": ["and_true_aggregation", "test_aggregation"]}
+
+    @filter_args
+    @enforce("read", "aggregation_algorithms")
+    def get_aggregation_algorithm(self, user_name, intra_extension_uuid):
+        return self.driver.get_meta_rule_dict(intra_extension_uuid)
+
+    @filter_args
+    @enforce("read", "aggregation_algorithms")
+    @enforce("write", "aggregation_algorithms")
+    def set_aggregation_algorithm(self, user_name, intra_extension_uuid, aggregation_algorithm):
+        if aggregation_algorithm not in self.get_aggregation_algorithms(
+                user_name, intra_extension_uuid)["aggregation_algorithms"]:
+            raise IntraExtensionError("Unknown aggregation_algorithm: {}".format(aggregation_algorithm))
+        meta_rule = self.driver.get_meta_rule_dict(intra_extension_uuid)
+        meta_rule["aggregation"] = aggregation_algorithm
+        return self.driver.set_meta_rule_dict(intra_extension_uuid, meta_rule)
+
+    @filter_args
+    @enforce("read", "sub_meta_rule")
+    def get_sub_meta_rule(self, user_name, intra_extension_uuid):
+        return self.driver.get_meta_rule_dict(intra_extension_uuid)
+
+    @filter_args
+    @enforce("read", "sub_meta_rule")
+    @enforce("write", "sub_meta_rule")
+    def set_sub_meta_rule(self, user_name, intra_extension_uuid, sub_meta_rules):
+        # TODO (dthom): When sub_meta_rule is set, all rules must be dropped
+        # because the previous rules cannot be mapped to the new sub_meta_rule.
+        for relation in sub_meta_rules.keys():
+            if relation not in self.get_sub_meta_rule_relations(user_name, intra_extension_uuid)["sub_meta_rule_relations"]:
+                LOG.error("set_sub_meta_rule unknown MetaRule relation {}".format(relation))
+                raise IntraExtensionError("Bad input data.")
+            for cat in ("subject_categories", "object_categories", "action_categories"):
+                if cat not in sub_meta_rules[relation]:
+                    LOG.error("set_sub_meta_rule category {} missed".format(cat))
+                    raise IntraExtensionError("Bad input data.")
+                if type(sub_meta_rules[relation][cat]) is not list:
+                    LOG.error("set_sub_meta_rule category {} is not a list".format(cat))
+                    raise IntraExtensionError("Bad input data.")
+            subject_categories = self.get_subject_category_dict(user_name, intra_extension_uuid)
+            for data in sub_meta_rules[relation]["subject_categories"]:
+                    if data not in subject_categories["subject_categories"]:
+                        LOG.error("set_sub_meta_rule category {} is not part of subject_categories {}".format(
+                            data, subject_categories))
+                        raise IntraExtensionError("Bad input data.")
+            object_categories = self.get_object_category_dict(user_name, intra_extension_uuid)
+            for data in sub_meta_rules[relation]["object_categories"]:
+                    if data not in object_categories["object_categories"]:
+                        LOG.error("set_sub_meta_rule category {} is not part of object_categories {}".format(
+                            data, object_categories))
+                        raise IntraExtensionError("Bad input data.")
+            action_categories = self.get_action_category_dict(user_name, intra_extension_uuid)
+            for data in sub_meta_rules[relation]["action_categories"]:
+                    if data not in action_categories["action_categories"]:
+                        LOG.error("set_sub_meta_rule category {} is not part of action_categories {}".format(
+                            data, action_categories))
+                        raise IntraExtensionError("Bad input data.")
+        aggregation = self.driver.get_meta_rule_dict(intra_extension_uuid)["aggregation"]
+        return self.driver.set_meta_rule_dict(
+            intra_extension_uuid,
+            {
+                "aggregation": aggregation,
+                "sub_meta_rules": sub_meta_rules
+            })
+
+    # Sub-rules functions
+    @filter_args
+    @enforce("read", "sub_rules")
+    def get_sub_rules(self, user_name, intra_extension_uuid):
+        return self.driver.get_rules(intra_extension_uuid)
+
+    @filter_args
+    @enforce("read", "sub_rules")
+    @enforce("write", "sub_rules")
+    def set_sub_rule(self, user_name, intra_extension_uuid, relation, sub_rule):
+        for item in sub_rule:
+            if type(item) not in (str, unicode, bool):
+                raise IntraExtensionError("Bad input data (sub_rule).")
+        ref_rules = self.driver.get_rules(intra_extension_uuid)
+        _sub_rule = list(sub_rule)
+        if relation not in self.get_sub_meta_rule_relations(user_name, intra_extension_uuid)["sub_meta_rule_relations"]:
+            raise IntraExtensionError("Bad input data (rules).")
+        # filter strings in sub_rule
+        sub_rule = [filter_input(x) for x in sub_rule]
+        # check if length of sub_rule is correct from metadata_sub_rule
+        metadata_sub_rule = self.get_sub_meta_rule(user_name, intra_extension_uuid)
+        metadata_sub_rule_length = len(metadata_sub_rule['sub_meta_rules'][relation]["subject_categories"]) + \
+                                   len(metadata_sub_rule['sub_meta_rules'][relation]["action_categories"]) + \
+                                   len(metadata_sub_rule['sub_meta_rules'][relation]["object_categories"]) + 1
+        if metadata_sub_rule_length != len(sub_rule):
+            raise IntraExtensionError("Bad number of argument in sub_rule {}/{}".format(sub_rule,
+                                                                                        metadata_sub_rule_length))
+        # check if each item in sub_rule match a corresponding scope value
+        for category in metadata_sub_rule['sub_meta_rules'][relation]["subject_categories"]:
+            item = _sub_rule.pop(0)
+            if item not in self.get_subject_category_scope_dict(
+                    user_name,
+                    intra_extension_uuid, category)["subject_category_scope"][category].keys():
+                raise IntraExtensionError("Bad subject value in sub_rule {}/{}".format(category, item))
+        for category in metadata_sub_rule['sub_meta_rules'][relation]["action_categories"]:
+            action_categories = self.get_action_category_scope_dict(
+                        user_name,
+                        intra_extension_uuid, category)["action_category_scope"][category]
+            item = _sub_rule.pop(0)
+            if item not in action_categories.keys():
+                self.moonlog_api.warning("set_sub_rule bad action value in sub_rule {}/{}".format(category, item))
+                raise IntraExtensionError("Bad input data.")
+        for category in metadata_sub_rule['sub_meta_rules'][relation]["object_categories"]:
+            item = _sub_rule.pop(0)
+            if item not in self.get_object_category_scope_dict(
+                    user_name,
+                    intra_extension_uuid, category)["object_category_scope"][category].keys():
+                raise IntraExtensionError("Bad object value in sub_rule {}/{}".format(category, item))
+        # check if relation is already there
+        if relation not in ref_rules["rules"]:
+            ref_rules["rules"][relation] = list()
+        # add sub_rule
+        ref_rules["rules"][relation].append(sub_rule)
+        return self.driver.set_rules(intra_extension_uuid, ref_rules["rules"])
+
+    @filter_args
+    @enforce("read", "sub_rules")
+    @enforce("write", "sub_rules")
+    def del_sub_rule(self, user_name, intra_extension_uuid, relation_name, rule):
+        ref_rules = self.driver.get_rules(intra_extension_uuid)
+        rule = rule.split("+")
+        for index, _item in enumerate(rule):
+            if "True" in _item:
+                rule[index] = True
+            if "False" in _item:
+                rule[index] = False
+        if relation_name in ref_rules["rules"]:
+            if rule in ref_rules["rules"][relation_name]:
+                ref_rules["rules"][relation_name].remove(rule)
+            else:
+                self.moonlog_api.error("Unknown rule: {}".format(rule))
+        else:
+            self.moonlog_api.error("Unknown relation name for rules: {}".format(relation_name))
+        return self.driver.set_rules(intra_extension_uuid, ref_rules["rules"])
+
+
+@dependency.provider('authz_api')
+@dependency.requires('identity_api', 'moonlog_api', 'tenant_api')
+class IntraExtensionAuthzManager(IntraExtensionManager):
+
+    __genre__ = "authz"
+
+    def authz(self, uuid, sub, obj, act):
+        """Check authorization for a particular action.
+
+        :param uuid: UUID of a tenant
+        :param sub: subject of the request
+        :param obj: object of the request
+        :param act: action of the request
+        :return: True or False or raise an exception
+        """
+        _uuid = self.tenant_api.get_extension_uuid(uuid, "authz")
+        return super(IntraExtensionAuthzManager, self).authz(_uuid, sub, obj, act)
+
+    def delete_intra_extension(self, intra_extension_id):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def set_subject_dict(self, user_name, intra_extension_uuid, subject_dict):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def add_subject_dict(self, user_name, intra_extension_uuid, subject_uuid):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def del_subject(self, user_name, intra_extension_uuid, subject_uuid):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def set_object_dict(self, user_name, intra_extension_uuid, object_dict):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def add_object_dict(self, user_name, intra_extension_uuid, object_name):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def del_object(self, user_name, intra_extension_uuid, object_uuid):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def set_action_dict(self, user_name, intra_extension_uuid, action_dict):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def add_action_dict(self, user_name, intra_extension_uuid, action_name):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def del_action(self, user_name, intra_extension_uuid, action_uuid):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def set_subject_category_dict(self, user_name, intra_extension_uuid, subject_category):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def add_subject_category_dict(self, user_name, intra_extension_uuid, subject_category_name):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def del_subject_category(self, user_name, intra_extension_uuid, subject_uuid):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def set_object_category_dict(self, user_name, intra_extension_uuid, object_category):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def add_object_category_dict(self, user_name, intra_extension_uuid, object_category_name):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def del_object_category(self, user_name, intra_extension_uuid, object_uuid):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def set_action_category_dict(self, user_name, intra_extension_uuid, action_category):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def add_action_category_dict(self, user_name, intra_extension_uuid, action_category_name):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def del_action_category(self, user_name, intra_extension_uuid, action_uuid):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def set_subject_category_scope_dict(self, user_name, intra_extension_uuid, category, scope):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def add_subject_category_scope_dict(self, user_name, intra_extension_uuid, subject_category, scope_name):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def del_subject_category_scope(self, user_name, intra_extension_uuid, subject_category, subject_category_scope):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def set_object_category_scope_dict(self, user_name, intra_extension_uuid, category, scope):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def add_object_category_scope_dict(self, user_name, intra_extension_uuid, object_category, scope_name):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def del_object_category_scope(self, user_name, intra_extension_uuid, object_category, object_category_scope):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def set_action_category_scope_dict(self, user_name, intra_extension_uuid, category, scope):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def add_action_category_scope_dict(self, user_name, intra_extension_uuid, action_category, scope_name):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def del_action_category_scope(self, user_name, intra_extension_uuid, action_category, action_category_scope):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def set_subject_category_assignment_dict(self, user_name, intra_extension_uuid, subject_uuid, assignment_dict):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def del_subject_category_assignment(self, user_name, intra_extension_uuid, subject_uuid, category_uuid, scope_uuid):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def add_subject_category_assignment_dict(self, user_name, intra_extension_uuid, subject_uuid, category_uuid, scope_uuid):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def set_object_category_assignment_dict(self, user_name, intra_extension_uuid, object_uuid, assignment_dict):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def del_object_category_assignment(self, user_name, intra_extension_uuid, object_uuid, category_uuid, scope_uuid):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def add_object_category_assignment_dict(self, user_name, intra_extension_uuid, object_uuid, category_uuid, scope_uuid):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def set_action_category_assignment_dict(self, user_name, intra_extension_uuid, action_uuid, assignment_dict):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def del_action_category_assignment(self, user_name, intra_extension_uuid, action_uuid, category_uuid, scope_uuid):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def add_action_category_assignment_dict(self, user_name, intra_extension_uuid, action_uuid, category_uuid, scope_uuid):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def set_aggregation_algorithm(self, user_name, intra_extension_uuid, aggregation_algorithm):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def set_sub_meta_rule(self, user_name, intra_extension_uuid, sub_meta_rules):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def set_sub_rule(self, user_name, intra_extension_uuid, relation, sub_rule):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+    def del_sub_rule(self, user_name, intra_extension_uuid, relation_name, rule):
+        raise AuthIntraExtensionModificationNotAuthorized()
+
+@dependency.provider('admin_api')
+@dependency.requires('identity_api', 'moonlog_api', 'tenant_api')
+class IntraExtensionAdminManager(IntraExtensionManager):
+
+    __genre__ = "admin"
+
+    # def set_perimeter_values(self, ie, policy_dir):
+    #
+    #     # Check if object like "subjects", "objects", "actions" exist...
+    #     perimeter_path = os.path.join(policy_dir, 'perimeter.json')
+    #     f = open(perimeter_path)
+    #     json_perimeter = json.load(f)
+    #     for item in ("subjects", "objects", "actions"):
+    #         if item not in json_perimeter["objects"]:
+    #             raise AdminIntraExtensionCreationError()
+    #
+    #     super(IntraExtensionAdminManager, self).set_perimeter_values(ie, policy_dir)
+    #
+    # @filter_args
+    # def add_subject_dict(self, user_name, uuid, subject_uuid):
+    #     raise AdminIntraExtensionModificationNotAuthorized()
+    #
+    # @filter_args
+    # def del_subject(self, user_name, uuid, subject_uuid):
+    #     raise AdminIntraExtensionModificationNotAuthorized()
+
+
+class AuthzDriver(object):
+
+    def get_subject_category_list(self, extension_uuid):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def get_object_category_list(self, extension_uuid):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def get_action_category_list(self, extension_uuid):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def get_subject_category_value_dict(self, extension_uuid, subject_uuid):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def get_object_category_value_dict(self, extension_uuid, object_uuid):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def get_action_category_value_dict(self, extension_uuid, action_uuid):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def get_meta_rule(self, extension_uuid):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def get_rules(self, extension_uuid):
+        raise exception.NotImplemented()  # pragma: no cover
+
+
+class UpdateDriver(object):
+
+    def get_intra_extensions(self):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def get_intra_extension(self, extension_uuid):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def create_intra_extensions(self, extension_uuid, intra_extension):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def delete_intra_extensions(self, extension_uuid):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # Getter and setter for tenant
+
+    def get_tenant(self, uuid):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def set_tenant(self, uuid, tenant_id):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # Getter and setter for name
+
+    def get_name(self, uuid):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def set_name(self, uuid, name):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # Getter and setter for model
+
+    def get_model(self, uuid):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def set_model(self, uuid, model):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # Getter and setter for genre
+
+    def get_genre(self, uuid):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def set_genre(self, uuid, genre):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # Getter and setter for description
+
+    def get_description(self, uuid):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def set_description(self, uuid, args):
+        raise exception.NotImplemented()  # pragma: no cover
+
+
+class IntraExtensionDriver(object):
+
+    # Getter ad Setter for subjects
+
+    def get_subject_dict(self, extension_uuid):
+        """Get the list of subject for that IntraExtension
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :return: a dictionary containing all subjects for that IntraExtension, eg. {"uuid1": "name1", "uuid2": "name2"}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def set_subject_dict(self, extension_uuid, subject_dict):
+        """Set the list of subject for that IntraExtension
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param subject_dict: dict of subject: {"uuid1": "name1", "uuid2": "name2"}
+        :type subject_dict: dict
+        :return: a dictionary containing all subjects for that IntraExtension, eg. {"uuid1": "name1", "uuid2": "name2"}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def add_subject(self, extension_uuid, subject_uuid, subject_name):
+        """Add a subject
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param subject_uuid: Subject UUID
+        :type subject_uuid: string
+        :param subject_name: Subject name
+        :type subject_name: string
+        :return: the added subject {"uuid1": "name1"}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def remove_subject(self, extension_uuid, subject_uuid):
+        """Remove a subject
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param subject_uuid: Subject UUID
+        :type subject_uuid: string
+        :return: None
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # Getter ad Setter for objects
+
+    def get_object_dict(self, extension_uuid):
+        """Get the list of object for that IntraExtension
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :return: a dictionary containing all objects for that IntraExtension, eg. {"uuid1": "name1", "uuid2": "name2"}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def set_object_dict(self, extension_uuid, object_dict):
+        """Set the list of object for that IntraExtension
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param object_dict: dict of object: {"uuid1": "name1", "uuid2": "name2"}
+        :type object_dict: dict
+        :return: a dictionary containing all objects for that IntraExtension, eg. {"uuid1": "name1", "uuid2": "name2"}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def add_object(self, extension_uuid, object_uuid, object_name):
+        """Ad an object
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param object_uuid: Object UUID
+        :type object_uuid: string
+        :param object_name: Object name
+        :type object_name: string
+        :return: the added object {"uuid1": "name1"}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def remove_object(self, extension_uuid, object_uuid):
+        """Remove an object
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param object_uuid: Object UUID
+        :type object_uuid: string
+        :return: None
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # Getter ad Setter for actions
+
+    def get_action_dict(self, extension_uuid):
+        """ Get the list of action for that IntraExtension
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :return: a dictionary containing all actions for that IntraExtension, eg. {"uuid1": "name1", "uuid2": "name2"}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def set_action_dict(self, extension_uuid, action_dict):
+        """ Set the list of action for that IntraExtension
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param action_dict: dict of actions: {"uuid1": "name1", "uuid2": "name2"}
+        :type action_dict: dict
+        :return: a dictionary containing all actions for that IntraExtension, eg. {"uuid1": "name1", "uuid2": "name2"}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def add_action(self, extension_uuid, action_uuid, action_name):
+        """Ad an action
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param action_uuid: Action UUID
+        :type action_uuid: string
+        :param action_name: Action name
+        :type action_name: string
+        :return: the added action {"uuid1": "name1"}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def remove_action(self, extension_uuid, action_uuid):
+        """Remove an action
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param action_uuid: Action UUID
+        :type action_uuid: string
+        :return: None
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # Getter ad Setter for subject_category
+
+    def get_subject_category_dict(self, extension_uuid):
+        """Get a list of all subject categories
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :return: a dictionary containing all subject categories {"uuid1": "name1", "uuid2": "name2"}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def set_subject_category_dict(self, extension_uuid, subject_categories):
+        """Set the list of all subject categories
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param subject_categories: dict of subject categories {"uuid1": "name1", "uuid2": "name2"}
+        :type subject_categories: dict
+        :return: a dictionary containing all subject categories {"uuid1": "name1", "uuid2": "name2"}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def add_subject_category_dict(self, extension_uuid, subject_category_uuid, subject_category_name):
+        """Add a subject category
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param subject_category_uuid: the UUID of the subject category
+        :type subject_category_uuid: string
+        :param subject_category_name: the name of the subject category
+        :type subject_category_name: string
+        :return: a dictionnary with the subject catgory added {"uuid1": "name1"}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def remove_subject_category(self, extension_uuid, subject_category_uuid):
+        """Remove one subject category
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param subject_category_uuid: the UUID of subject category to remove
+        :type subject_category_uuid: string
+        :return: a dictionary containing all subject categories {"uuid1": "name1", "uuid2": "name2"}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # Getter ad Setter for object_category
+
+    def get_object_category_dict(self, extension_uuid):
+        """Get a list of all object categories
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :return: a dictionary containing all object categories {"uuid1": "name1", "uuid2": "name2"}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def set_object_category_dict(self, extension_uuid, object_categories):
+        """Set the list of all object categories
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param object_categories: dict of object categories {"uuid1": "name1", "uuid2": "name2"}
+        :type object_categories: dict
+        :return: a dictionary containing all object categories {"uuid1": "name1", "uuid2": "name2"}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def add_object_category_dict(self, extension_uuid, object_category_uuid, object_category_name):
+        """Add a object category
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param object_category_uuid: the UUID of the object category
+        :type object_category_uuid: string
+        :param object_category_name: the name of the object category
+        :type object_category_name: string
+        :return: a dictionnary with the object catgory added {"uuid1": "name1"}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def remove_object_category(self, extension_uuid, object_category_uuid):
+        """Remove one object category
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param object_category_uuid: the UUID of object category to remove
+        :type object_category_uuid: string
+        :return: a dictionary containing all object categories {"uuid1": "name1", "uuid2": "name2"}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # Getter ad Setter for action_category
+
+    def get_action_category_dict(self, extension_uuid):
+        """Get a list of all action categories
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :return: a dictionary containing all action categories {"uuid1": "name1", "uuid2": "name2"}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def set_action_category_dict(self, extension_uuid, action_categories):
+        """Set the list of all action categories
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param action_categories: dict of action categories {"uuid1": "name1", "uuid2": "name2"}
+        :type action_categories: dict
+        :return: a dictionary containing all action categories {"uuid1": "name1", "uuid2": "name2"}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def add_action_category_dict(self, extension_uuid, action_category_uuid, action_category_name):
+        """Add a action category
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param action_category_uuid: the UUID of the action category
+        :type action_category_uuid: string
+        :param action_category_name: the name of the action category
+        :type action_category_name: string
+        :return: a dictionnary with the action catgory added {"uuid1": "name1"}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def remove_action_category(self, extension_uuid, action_category_uuid):
+        """Remove one action category
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param action_category_uuid: the UUID of action category to remove
+        :type action_category_uuid: string
+        :return: a dictionary containing all action categories {"uuid1": "name1", "uuid2": "name2"}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # Getter and Setter for subject_category_value_scope
+
+    def get_subject_category_scope_dict(self, extension_uuid, category):
+        """Get a list of all subject category scope
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param category: the category UUID where the scope values are
+        :type category: string
+        :return: a dictionary containing all subject category scope {"category1": {"scope_uuid1": "scope_name1}}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def set_subject_category_scope_dict(self, extension_uuid, subject_category, scope):
+        """Set the list of all scope for that subject category
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param subject_category: the UUID of the subject category where this scope will be set
+        :type subject_category: string
+        :return: a dictionary containing all scope {"scope_uuid1": "scope_name1, "scope_uuid2": "scope_name2}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def add_subject_category_scope_dict(self, extension_uuid, subject_category, scope_uuid, scope_name):
+        """Add a subject category
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param subject_category: the subject category UUID where the scope will be added
+        :type subject_category: string
+        :param scope_uuid: the UUID of the subject category
+        :type scope_uuid: string
+        :param scope_name: the name of the subject category
+        :type scope_name: string
+        :return: a dictionary containing the subject category scope added {"category1": {"scope_uuid1": "scope_name1}}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def remove_subject_category_scope_dict(self, extension_uuid, subject_category, scope_uuid):
+        """Remove one scope belonging to a subject category
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param subject_category: the UUID of subject categorywhere we can find the scope to remove
+        :type subject_category: string
+        :param scope_uuid: the UUID of the scope to remove
+        :type scope_uuid: string
+        :return: None
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # Getter and Setter for object_category_scope
+
+    def get_object_category_scope_dict(self, extension_uuid, category):
+        """Get a list of all object category scope
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param category: the category UUID where the scope values are
+        :type category: string
+        :return: a dictionary containing all object category scope {"category1": {"scope_uuid1": "scope_name1}}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def set_object_category_scope_dict(self, extension_uuid, object_category, scope):
+        """Set the list of all scope for that object category
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param object_category: the UUID of the object category where this scope will be set
+        :type object_category: string
+        :return: a dictionary containing all scope {"scope_uuid1": "scope_name1, "scope_uuid2": "scope_name2}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def add_object_category_scope_dict(self, extension_uuid, object_category, scope_uuid, scope_name):
+        """Add a object category
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param object_category: the object category UUID where the scope will be added
+        :type object_category: string
+        :param scope_uuid: the UUID of the object category
+        :type scope_uuid: string
+        :param scope_name: the name of the object category
+        :type scope_name: string
+        :return: a dictionary containing the object category scope added {"category1": {"scope_uuid1": "scope_name1}}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def remove_object_category_scope_dict(self, extension_uuid, object_category, scope_uuid):
+        """Remove one scope belonging to a object category
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param object_category: the UUID of object categorywhere we can find the scope to remove
+        :type object_category: string
+        :param scope_uuid: the UUID of the scope to remove
+        :type scope_uuid: string
+        :return: None
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # Getter and Setter for action_category_scope
+
+    def get_action_category_scope_dict(self, extension_uuid, category):
+        """Get a list of all action category scope
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param category: the category UUID where the scope values are
+        :type category: string
+        :return: a dictionary containing all action category scope {"category1": {"scope_uuid1": "scope_name1}}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def set_action_category_scope_dict(self, extension_uuid, action_category, scope):
+        """Set the list of all scope for that action category
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param action_category: the UUID of the action category where this scope will be set
+        :type action_category: string
+        :return: a dictionary containing all scope {"scope_uuid1": "scope_name1, "scope_uuid2": "scope_name2}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def add_action_category_scope_dict(self, extension_uuid, action_category, scope_uuid, scope_name):
+        """Add a action category
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param action_category: the action category UUID where the scope will be added
+        :type action_category: string
+        :param scope_uuid: the UUID of the action category
+        :type scope_uuid: string
+        :param scope_name: the name of the action category
+        :type scope_name: string
+        :return: a dictionary containing the action category scope added {"category1": {"scope_uuid1": "scope_name1}}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def remove_action_category_scope_dict(self, extension_uuid, action_category, scope_uuid):
+        """Remove one scope belonging to a action category
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param action_category: the UUID of action categorywhere we can find the scope to remove
+        :type action_category: string
+        :param scope_uuid: the UUID of the scope to remove
+        :type scope_uuid: string
+        :return: None
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # Getter and Setter for subject_category_assignment
+
+    def get_subject_category_assignment_dict(self, extension_uuid, subject_uuid):
+        """Get the assignment for a given subject_uuid
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param subject_uuid: subject UUID
+        :type subject_uuid: string
+        :return: a dictionary of assignment for the given subject {"cat1": ["scope_uuid1", "scope_uuid2"]}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def set_subject_category_assignment_dict(self, extension_uuid, subject_uuid, assignment_dict):
+        """Set the assignment for a given subject_uuid
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param subject_uuid: subject UUID
+        :type subject_uuid: string
+        :param assignment_dict: the assignment dictionary {"cat1": ["scope_uuid1", "scope_uuid2"]}
+        :type assignment_dict: dict
+        :return: a dictionary of assignment for the given subject {"cat1": ["scope_uuid1", "scope_uuid2"]}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def add_subject_category_assignment_dict(self, extension_uuid, subject_uuid, category_uuid, scope_uuid):
+        """Add a scope to a category and to a subject
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param subject_uuid: the subject UUID
+        :type subject_uuid: string
+        :param category_uuid: the category UUID
+        :type category_uuid: string
+        :param scope_uuid: the scope UUID
+        :type scope_uuid: string
+        :return: a dictionary of assignment for the given subject {"cat1": ["scope_uuid1", "scope_uuid2"]}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def remove_subject_category_assignment(self, extension_uuid, subject_uuid, category_uuid, scope_uuid):
+        """Remove a scope from a category and from a subject
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param subject_uuid: the subject UUID
+        :type subject_uuid: string
+        :param category_uuid: the category UUID
+        :type category_uuid: string
+        :param scope_uuid: the scope UUID
+        :type scope_uuid: string
+        :return: None
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # Getter and Setter for object_category_assignment
+
+    def get_object_category_assignment_dict(self, extension_uuid, object_uuid):
+        """Get the assignment for a given object_uuid
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param object_uuid: object UUID
+        :type object_uuid: string
+        :return: a dictionary of assignment for the given object {"cat1": ["scope_uuid1", "scope_uuid2"]}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def set_object_category_assignment_dict(self, extension_uuid, object_uuid, assignment_dict):
+        """Set the assignment for a given object_uuid
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param object_uuid: object UUID
+        :type object_uuid: string
+        :param assignment_dict: the assignment dictionary {"cat1": ["scope_uuid1", "scope_uuid2"]}
+        :type assignment_dict: dict
+        :return: a dictionary of assignment for the given object {"cat1": ["scope_uuid1", "scope_uuid2"]}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def add_object_category_assignment_dict(self, extension_uuid, object_uuid, category_uuid, scope_uuid):
+        """Add a scope to a category and to a object
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param object_uuid: the object UUID
+        :type object_uuid: string
+        :param category_uuid: the category UUID
+        :type category_uuid: string
+        :param scope_uuid: the scope UUID
+        :type scope_uuid: string
+        :return: a dictionary of assignment for the given object {"cat1": ["scope_uuid1", "scope_uuid2"]}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def remove_object_category_assignment(self, extension_uuid, object_uuid, category_uuid, scope_uuid):
+        """Remove a scope from a category and from a object
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param object_uuid: the object UUID
+        :type object_uuid: string
+        :param category_uuid: the category UUID
+        :type category_uuid: string
+        :param scope_uuid: the scope UUID
+        :type scope_uuid: string
+        :return: None
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # Getter and Setter for action_category_assignment
+
+    def get_action_category_assignment_dict(self, extension_uuid, action_uuid):
+        """Get the assignment for a given action_uuid
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param action_uuid: action UUID
+        :type action_uuid: string
+        :return: a dictionary of assignment for the given action {"cat1": ["scope_uuid1", "scope_uuid2"]}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def set_action_category_assignment_dict(self, extension_uuid, action_uuid, assignment_dict):
+        """Set the assignment for a given action_uuid
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param action_uuid: action UUID
+        :type action_uuid: string
+        :param assignment_dict: the assignment dictionary {"cat1": ["scope_uuid1", "scope_uuid2"]}
+        :type assignment_dict: dict
+        :return: a dictionary of assignment for the given action {"cat1": ["scope_uuid1", "scope_uuid2"]}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def add_action_category_assignment_dict(self, extension_uuid, action_uuid, category_uuid, scope_uuid):
+        """Add a scope to a category and to a action
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param action_uuid: the action UUID
+        :type action_uuid: string
+        :param category_uuid: the category UUID
+        :type category_uuid: string
+        :param scope_uuid: the scope UUID
+        :type scope_uuid: string
+        :return: a dictionary of assignment for the given action {"cat1": ["scope_uuid1", "scope_uuid2"]}
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def remove_action_category_assignment(self, extension_uuid, action_uuid, category_uuid, scope_uuid):
+        """Remove a scope from a category and from a action
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param action_uuid: the action UUID
+        :type action_uuid: string
+        :param category_uuid: the category UUID
+        :type category_uuid: string
+        :param scope_uuid: the scope UUID
+        :type scope_uuid: string
+        :return: None
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # Getter and Setter for meta_rule
+
+    def get_meta_rule_dict(self, extension_uuid):
+        """Get the Meta rule
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :return: a dictionary containing the meta_rule
+
+        Here is an example of a meta_rule:
+        {
+          "sub_meta_rules": {
+            "relation_super": {
+              "subject_categories": ["role"],
+              "action_categories": ["computing_action"],
+              "object_categories": ["id"],
+              "relation": "relation_super"
+            }
+          },
+          "aggregation": "and_true_aggregation"
+        }
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def set_meta_rule_dict(self, extension_uuid, meta_rule):
+        """Set the Meta rule
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param meta_rule: a dictionary representing the meta_rule (see below)
+        :return:a dictionary containing the meta_rule
+
+        Here is an example of a meta_rule:
+        {
+          "sub_meta_rules": {
+            "relation_super": {
+              "subject_categories": ["role"],
+              "action_categories": ["computing_action"],
+              "object_categories": ["id"],
+              "relation": "relation_super"
+            }
+          },
+          "aggregation": "and_true_aggregation"
+        }
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # Getter and Setter for rules
+
+    def get_rules(self, extension_uuid):
+        """Get all rules
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :return: a dictionary containing rules ie.
+        {
+            "relation_super":[
+            ["admin", "vm_admin", "servers", True],
+            ["admin", "vm_access", "servers", True]
+            ]
+        }
+        All items will be UUID.
+        The last boolean item is the positive/negative value. If True, request that conforms to that rule
+        will be authorized, if false, request will be rejected.
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def set_rules(self, extension_uuid, rules):
+        """Set all rules
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param rules: a dictionary containing rules (see below)
+        :type rules: dict
+        :return: a dictionary containing rules ie.
+        {
+            "relation_super":[
+            ["admin", "vm_admin", "servers", True],
+            ["admin", "vm_access", "servers", True]
+            ]
+        }
+        All items will be UUID.
+        The last boolean item is the positive/negative value. If True, request that conforms to that rule
+        will be authorized, if false, request will be rejected.
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # Getter and Setter for intra_extension
+
+    def get_intra_extension_list(self):
+        """Get a list of IntraExtension UUIDs
+
+        :return: a list of IntraExtension UUIDs ["uuid1", "uuid2"]
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def get_intra_extension_dict(self, extension_uuid):
+        """Get a description of an IntraExtension
+
+        :param extension_uuid: the UUID of the IntraExtension
+        :type extension_uuid: string
+        :return:
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def set_intra_extension(self, extension_uuid, extension_dict):
+        """Set a new IntraExtension
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :param extension_dict: a dictionary withe the description of the IntraExtension (see below)
+        :type extension_dict: dict
+        :return: the IntraExtension dictionary, example:
+        {
+            "id": "uuid1",
+            "name": "Name of the intra_extension",
+            "model": "Model of te intra_extension (admin or authz)"
+            "description": "a description of the intra_extension"
+        }
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def delete_intra_extension(self, extension_uuid):
+        """Delete an IntraExtension
+
+        :param extension_uuid: IntraExtension UUID
+        :type extension_uuid: string
+        :return: None
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def get_sub_meta_rule_relations(self, username, uuid):
+        # TODO: check which relations are really usable
+        return {"sub_meta_rule_relations": ["relation_super", "relation_test"]}
+
+
+class LogDriver(object):
+
+    def authz(self, message):
+        """Log authorization message
+
+        :param message: the message to log
+        :type message: string
+        :return: None
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def debug(self, message):
+        """Log debug message
+
+        :param message: the message to log
+        :type message: string
+        :return: None
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def info(self, message):
+        """Log informational message
+
+        :param message: the message to log
+        :type message: string
+        :return: None
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def warning(self, message):
+        """Log warning message
+
+        :param message: the message to log
+        :type message: string
+        :return: None
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def error(self, message):
+        """Log error message
+
+        :param message: the message to log
+        :type message: string
+        :return: None
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def critical(self, message):
+        """Log critical message
+
+        :param message: the message to log
+        :type message: string
+        :return: None
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    def get_logs(self, options):
+        """Get logs
+
+        :param options: options to filter log events
+        :type options: string eg: "event_number=10,from=2014-01-01-10:10:10,to=2014-01-01-12:10:10,filter=expression"
+        :return: a list of log events
+
+        TIME_FORMAT is '%Y-%m-%d-%H:%M:%S'
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+# @dependency.provider('superextension_api')
+# class SuperExtensionManager(manager.Manager):
+#
+#     def __init__(self):
+#         driver = CONF.moon.superextension_driver
+#         super(SuperExtensionManager, self).__init__(driver)
+#
+#     def authz(self, sub, obj, act):
+#         #return self.driver.admin(sub, obj, act)
+#         return True
+
+
+# @dependency.provider('interextension_api')
+# @dependency.requires('identity_api')
+# class InterExtensionManager(manager.Manager):
+#
+#     def __init__(self):
+#         driver = CONF.moon.interextension_driver
+#         super(InterExtensionManager, self).__init__(driver)
+#
+#     def check_inter_extension(self, uuid):
+#         if uuid not in self.get_inter_extensions():
+#             LOG.error("Unknown InterExtension {}".format(uuid))
+#             raise exception.NotFound("InterExtension not found.")
+#
+#     def get_inter_extensions(self):
+#         return self.driver.get_inter_extensions()
+#
+#     def get_inter_extension(self, uuid):
+#         return self.driver.get_inter_extension(uuid)
+#
+#     def create_inter_extension(self, inter_extension):
+#         ie = dict()
+#         ie['id'] = uuid4().hex
+#         ie["requesting_intra_extension_uuid"] = filter_input(inter_extension["requesting_intra_extension_uuid"])
+#         ie["requested_intra_extension_uuid"] = filter_input(inter_extension["requested_intra_extension_uuid"])
+#         ie["description"] = filter_input(inter_extension["description"])
+#         ie["virtual_entity_uuid"] = filter_input(inter_extension["virtual_entity_uuid"])
+#         ie["genre"] = filter_input(inter_extension["genre"])
+#
+#         ref = self.driver.create_inter_extensions(ie['id'], ie)
+#         return ref
+#
+#     def delete_inter_extension(self, inter_extension_id):
+#         LOG.error("Deleting {}".format(inter_extension_id))
+#         ref = self.driver.delete_inter_extensions(inter_extension_id)
+#         return ref
+#
+#
+# class SuperExtensionDriver(object):
+#
+#     def __init__(self):
+#         self.__super_extension = None
+#
+#     def admin(self, sub, obj, act):
+#         return self.__super_extension.authz(sub, obj, act)
+#
+#     def delegate(self, delegating_uuid, delegated_uuid, privilege):  # TODO later
+#         pass
+#
+#     # Getter and Setter for SuperExtensions
+#
+#     def get_super_extensions(self):
+#         raise exception.NotImplemented()  # pragma: no cover
+#
+#     def create_super_extensions(self, super_id, super_extension):
+#         raise exception.NotImplemented()  # pragma: no cover
+#
+#
+# class InterExtensionDriver(object):
+#
+#     # Getter and Setter for InterExtensions
+#
+#     def get_inter_extensions(self):
+#         raise exception.NotImplemented()  # pragma: no cover
+#
+#     def get_inter_extension(self, uuid):
+#         raise exception.NotImplemented()  # pragma: no cover
+#
+#     def create_inter_extensions(self, intra_id, intra_extension):
+#         raise exception.NotImplemented()  # pragma: no cover
+#
+#     def delete_inter_extensions(self, intra_extension_id):
+#         raise exception.NotImplemented()  # pragma: no cover
+#
+#
+# class VirtualEntityDriver(object):
+#
+#     # Getter and Setter for InterExtensions
+#
+#     def get_virtual_entities(self):
+#         raise exception.NotImplemented()  # pragma: no cover
+#
+#     def create_virtual_entities(self, ve_id, virtual_entity):
+#         raise exception.NotImplemented()  # pragma: no cover
+
diff --git a/keystone-moon/keystone/contrib/moon/exception.py b/keystone-moon/keystone/contrib/moon/exception.py
new file mode 100644 (file)
index 0000000..20a7d73
--- /dev/null
@@ -0,0 +1,112 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
+
+from keystone.common import dependency
+from keystone.exception import Error
+from keystone.i18n import _, _LW
+
+@dependency.requires('moonlog_api')
+class TenantError(Error):
+    message_format = _("There is an error requesting this tenant"
+                       " the server could not comply with the request"
+                       " since it is either malformed or otherwise"
+                       " incorrect. The client is assumed to be in error.")
+    code = 400
+    title = 'Tenant Error'
+    logger = "ERROR"
+
+    def __del__(self):
+        if self.logger == "ERROR":
+            self.moonlog_api.error(self.message_format)
+        elif self.logger == "WARNING":
+            self.moonlog_api.warning(self.message_format)
+        elif self.logger == "CRITICAL":
+            self.moonlog_api.critical(self.message_format)
+        elif self.logger == "AUTHZ":
+            self.moonlog_api.authz(self.message_format)
+            self.moonlog_api.error(self.message_format)
+        else:
+            self.moonlog_api.info(self.message_format)
+
+
+
+class TenantListEmptyError(TenantError):
+    message_format = _("The tenant list mapping is empty, you must set the mapping first.")
+    code = 400
+    title = 'Tenant List Empty Error'
+
+
+class TenantNotFoundError(TenantError):
+    message_format = _("The tenant UUID was not found.")
+    code = 400
+    title = 'Tenant UUID Not Found Error'
+
+
+class IntraExtensionError(TenantError):
+    message_format = _("There is an error requesting this IntraExtension.")
+    code = 400
+    title = 'Extension Error'
+
+
+class CategoryNotFound(IntraExtensionError):
+    message_format = _("The category is unknown.")
+    code = 400
+    title = 'Extension Error'
+    logger = "WARNING"
+
+
+class IntraExtensionUnMapped(TenantError):
+    message_format = _("The Extension is not mapped to a tenant.")
+    code = 400
+    title = 'Extension UUID Not Found Error'
+    logger = "WARNING"
+
+
+class IntraExtensionNotFound(IntraExtensionError):
+    message_format = _("The Extension for that tenant is unknown.")
+    code = 400
+    title = 'Extension UUID Not Found Error'
+    logger = "WARNING"
+
+
+class IntraExtensionNotAuthorized(IntraExtensionError):
+    message_format = _("User has no authorization for that action.")
+    code = 400
+    title = 'Authorization Error'
+    logger = "AUTHZ"
+
+
+class AdminIntraExtensionNotFound(IntraExtensionNotFound):
+    message_format = _("The admin Extension for that tenant is unknown.")
+    code = 400
+    title = 'Admin Extension UUID Not Found Error'
+    logger = "WARNING"
+
+
+class AdminIntraExtensionCreationError(IntraExtensionError):
+    message_format = _("The arguments for the creation of this admin Extension were malformed.")
+    code = 400
+    title = 'Admin Extension Creation Error'
+
+
+class AdminIntraExtensionModificationNotAuthorized(IntraExtensionError):
+    message_format = _("The modification of this admin Extension is not authorizaed.")
+    code = 400
+    title = 'Admin Extension Creation Error'
+    logger = "AUTHZ"
+
+class AuthIntraExtensionModificationNotAuthorized(IntraExtensionError):
+    message_format = _("The modification of this authz Extension is not authorizaed.")
+    code = 400
+    title = 'Authz Extension Creation Error'
+    logger = "AUTHZ"
+
+
+class AuthzIntraExtensionNotFound(IntraExtensionNotFound):
+    message_format = _("The authz Extension for that tenant is unknown.")
+    code = 400
+    title = 'Authz Extension UUID Not Found Error'
+    logger = "WARNING"
+
diff --git a/keystone-moon/keystone/contrib/moon/extension.py b/keystone-moon/keystone/contrib/moon/extension.py
new file mode 100644 (file)
index 0000000..efee55c
--- /dev/null
@@ -0,0 +1,740 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
+
+import os.path
+import copy
+import json
+import itertools
+from uuid import uuid4
+import logging
+
+LOG = logging.getLogger("moon.authz")
+
+
+class Metadata:
+
+    def __init__(self):
+        self.__name = ''
+        self.__model = ''
+        self.__genre = ''
+        self.__description = ''
+        self.__subject_categories = list()
+        self.__object_categories = list()
+        self.__meta_rule = dict()
+        self.__meta_rule['sub_meta_rules'] = list()
+        self.__meta_rule['aggregation'] = ''
+
+    def load_from_json(self, extension_setting_dir):
+        metadata_path = os.path.join(extension_setting_dir, 'metadata.json')
+        f = open(metadata_path)
+        json_metadata = json.load(f)
+        self.__name = json_metadata['name']
+        self.__model = json_metadata['model']
+        self.__genre = json_metadata['genre']
+        self.__description = json_metadata['description']
+        self.__subject_categories = copy.deepcopy(json_metadata['subject_categories'])
+        self.__object_categories = copy.deepcopy(json_metadata['object_categories'])
+        self.__meta_rule = copy.deepcopy(json_metadata['meta_rule'])
+
+    def get_name(self):
+        return self.__name
+
+    def get_genre(self):
+        return self.__genre
+
+    def get_model(self):
+        return self.__model
+
+    def get_subject_categories(self):
+        return self.__subject_categories
+
+    def get_object_categories(self):
+        return self.__object_categories
+
+    def get_meta_rule(self):
+        return self.__meta_rule
+
+    def get_meta_rule_aggregation(self):
+        return self.__meta_rule['aggregation']
+
+    def get_data(self):
+        data = dict()
+        data["name"] = self.get_name()
+        data["model"] = self.__model
+        data["genre"] = self.__genre
+        data["description"] = self.__description
+        data["subject_categories"] = self.get_subject_categories()
+        data["object_categories"] = self.get_object_categories()
+        data["meta_rule"] = dict(self.get_meta_rule())
+        return data
+
+    def set_data(self, data):
+        self.__name = data["name"]
+        self.__model = data["model"]
+        self.__genre = data["genre"]
+        self.__description = data["description"]
+        self.__subject_categories = list(data["subject_categories"])
+        self.__object_categories = list(data["object_categories"])
+        self.__meta_rule = dict(data["meta_rule"])
+
+
+class Configuration:
+    def __init__(self):
+        self.__subject_category_values = dict()
+        # examples: { "role": {"admin", "dev", }, }
+        self.__object_category_values = dict()
+        self.__rules = list()
+
+    def load_from_json(self, extension_setting_dir):
+        configuration_path = os.path.join(extension_setting_dir, 'configuration.json')
+        f = open(configuration_path)
+        json_configuration = json.load(f)
+        self.__subject_category_values = copy.deepcopy(json_configuration['subject_category_values'])
+        self.__object_category_values = copy.deepcopy(json_configuration['object_category_values'])
+        self.__rules = copy.deepcopy(json_configuration['rules'])  # TODO: currently a list, will be a dict with sub-meta-rule as key
+
+    def get_subject_category_values(self):
+        return self.__subject_category_values
+
+    def get_object_category_values(self):
+        return self.__object_category_values
+
+    def get_rules(self):
+        return self.__rules
+
+    def get_data(self):
+        data = dict()
+        data["subject_category_values"] = self.get_subject_category_values()
+        data["object_category_values"] = self.get_object_category_values()
+        data["rules"] = self.get_rules()
+        return data
+
+    def set_data(self, data):
+        self.__subject_category_values = list(data["subject_category_values"])
+        self.__object_category_values = list(data["object_category_values"])
+        self.__rules = list(data["rules"])
+
+
+class Perimeter:
+    def __init__(self):
+        self.__subjects = list()
+        self.__objects = list()
+
+    def load_from_json(self, extension_setting_dir):
+        perimeter_path = os.path.join(extension_setting_dir, 'perimeter.json')
+        f = open(perimeter_path)
+        json_perimeter = json.load(f)
+        self.__subjects = copy.deepcopy(json_perimeter['subjects'])
+        self.__objects = copy.deepcopy(json_perimeter['objects'])
+        # print(self.__subjects)
+        # print(self.__objects)
+
+    def get_subjects(self):
+        return self.__subjects
+
+    def get_objects(self):
+        return self.__objects
+
+    def get_data(self):
+        data = dict()
+        data["subjects"] = self.get_subjects()
+        data["objects"] = self.get_objects()
+        return data
+
+    def set_data(self, data):
+        self.__subjects = list(data["subjects"])
+        self.__objects = list(data["objects"])
+
+
+class Assignment:
+    def __init__(self):
+        self.__subject_category_assignments = dict()
+        # examples: { "role": {"user1": {"dev"}, "user2": {"admin",}}, }  TODO: limit to one value for each attr
+        self.__object_category_assignments = dict()
+
+    def load_from_json(self, extension_setting_dir):
+        assignment_path = os.path.join(extension_setting_dir, 'assignment.json')
+        f = open(assignment_path)
+        json_assignment = json.load(f)
+
+        self.__subject_category_assignments = dict(copy.deepcopy(json_assignment['subject_category_assignments']))
+        self.__object_category_assignments = dict(copy.deepcopy(json_assignment['object_category_assignments']))
+
+    def get_subject_category_assignments(self):
+        return self.__subject_category_assignments
+
+    def get_object_category_assignments(self):
+        return self.__object_category_assignments
+
+    def get_data(self):
+        data = dict()
+        data["subject_category_assignments"] = self.get_subject_category_assignments()
+        data["object_category_assignments"] = self.get_object_category_assignments()
+        return data
+
+    def set_data(self, data):
+        self.__subject_category_assignments = list(data["subject_category_assignments"])
+        self.__object_category_assignments = list(data["object_category_assignments"])
+
+
+class AuthzData:
+    def __init__(self, sub, obj, act):
+        self.validation = "False"  # "OK, KO, Out of Scope"  # "auth": False,
+        self.subject = sub
+        self.object = str(obj)
+        self.action = str(act)
+        self.type = ""  # intra-tenant, inter-tenant, Out of Scope
+        self.subject_attrs = dict()
+        self.object_attrs = dict()
+        self.requesting_tenant = ""  # "subject_tenant": subject_tenant,
+        self.requested_tenant = ""  # "object_tenant": object_tenant,
+
+    def __str__(self):
+        return """AuthzData:
+        validation={}
+        subject={}
+        object={}
+        action={}
+        """.format(self.validation, self.subject, self.object, self.action)
+
+
+class Extension:
+    def __init__(self):
+        self.metadata = Metadata()
+        self.configuration = Configuration()
+        self.perimeter = Perimeter()
+        self.assignment = Assignment()
+
+    def load_from_json(self, extension_setting_dir):
+        self.metadata.load_from_json(extension_setting_dir)
+        self.configuration.load_from_json(extension_setting_dir)
+        self.perimeter.load_from_json(extension_setting_dir)
+        self.assignment.load_from_json(extension_setting_dir)
+
+    def get_name(self):
+        return self.metadata.get_name()
+
+    def get_genre(self):
+        return self.metadata.get_genre()
+
+    def authz(self, sub, obj, act):
+        authz_data = AuthzData(sub, obj, act)
+        # authz_logger.warning('extension/authz request: [sub {}, obj {}, act {}]'.format(sub, obj, act))
+
+        if authz_data.subject in self.perimeter.get_subjects() and authz_data.object in self.perimeter.get_objects():
+
+            for subject_category in self.metadata.get_subject_categories():
+                authz_data.subject_attrs[subject_category] = copy.copy(
+                    # self.assignment.get_subject_category_attr(subject_category, sub)
+                    self.assignment.get_subject_category_assignments()[subject_category][sub]
+                )
+                # authz_logger.warning('extension/authz subject attribute: [subject attr: {}]'.format(
+                #     #self.assignment.get_subject_category_attr(subject_category, sub))
+                #     self.assignment.get_subject_category_assignments()[subject_category][sub])
+                # )
+
+            for object_category in self.metadata.get_object_categories():
+                if object_category == 'action':
+                    authz_data.object_attrs[object_category] = [act]
+                    # authz_logger.warning('extension/authz object attribute: [object attr: {}]'.format([act]))
+                else:
+                    authz_data.object_attrs[object_category] = copy.copy(
+                        self.assignment.get_object_category_assignments()[object_category][obj]
+                    )
+                    # authz_logger.warning('extension/authz object attribute: [object attr: {}]'.format(
+                    #     self.assignment.get_object_category_assignments()[object_category][obj])
+                    # )
+
+            _aggregation_data = dict()
+
+            for sub_meta_rule in self.metadata.get_meta_rule()["sub_meta_rules"].values():
+                _tmp_relation_args = list()
+
+                for sub_subject_category in sub_meta_rule["subject_categories"]:
+                    _tmp_relation_args.append(authz_data.subject_attrs[sub_subject_category])
+
+                for sub_object_category in sub_meta_rule["object_categories"]:
+                    _tmp_relation_args.append(authz_data.object_attrs[sub_object_category])
+
+                _relation_args = list(itertools.product(*_tmp_relation_args))
+
+                if sub_meta_rule['relation'] == 'relation_super':  # TODO: replace by Prolog Engine
+                    _aggregation_data['relation_super'] = dict()
+                    _aggregation_data['relation_super']['result'] = False
+                    for _relation_arg in _relation_args:
+                        if list(_relation_arg) in self.configuration.get_rules()[sub_meta_rule['relation']]:
+                            # authz_logger.warning(
+                            #     'extension/authz relation super OK: [sub_sl: {}, obj_sl: {}, action: {}]'.format(
+                            #         _relation_arg[0], _relation_arg[1], _relation_arg[2]
+                            #     )
+                            # )
+                            _aggregation_data['relation_super']['result'] = True
+                            break
+                    _aggregation_data['relation_super']['status'] = 'finished'
+
+                elif sub_meta_rule['relation'] == 'permission':
+                    _aggregation_data['permission'] = dict()
+                    _aggregation_data['permission']['result'] = False
+                    for _relation_arg in _relation_args:
+                        if list(_relation_arg) in self.configuration.get_rules()[sub_meta_rule['relation']]:
+                            # authz_logger.warning(
+                            #     'extension/authz relation permission OK: [role: {}, object: {}, action: {}]'.format(
+                            #         _relation_arg[0], _relation_arg[1], _relation_arg[2]
+                            #     )
+                            # )
+                            _aggregation_data['permission']['result'] = True
+                            break
+                    _aggregation_data['permission']['status'] = 'finished'
+
+            if self.metadata.get_meta_rule_aggregation() == 'and_true_aggregation':
+                authz_data.validation = "OK"
+                for relation in _aggregation_data:
+                    if _aggregation_data[relation]['status'] == 'finished' \
+                            and _aggregation_data[relation]['result'] == False:
+                        authz_data.validation = "KO"
+        else:
+            authz_data.validation = 'Out of Scope'
+
+        return authz_data.validation
+
+    # ---------------- metadate api ----------------
+
+    def get_subject_categories(self):
+        return self.metadata.get_subject_categories()
+
+    def add_subject_category(self, category_id):
+        if category_id in self.get_subject_categories():
+            return "[ERROR] Add Subject Category: Subject Category Exists"
+        else:
+            self.get_subject_categories().append(category_id)
+            self.configuration.get_subject_category_values()[category_id] = list()
+            self.assignment.get_subject_category_assignments()[category_id] = dict()
+            return self.get_subject_categories()
+
+    def del_subject_category(self, category_id):
+        if category_id in self.get_subject_categories():
+            self.configuration.get_subject_category_values().pop(category_id)
+            self.assignment.get_subject_category_assignments().pop(category_id)
+            self.get_subject_categories().remove(category_id)
+            return self.get_subject_categories()
+        else:
+            return "[ERROR] Del Subject Category: Subject Category Unknown"
+
+    def get_object_categories(self):
+        return self.metadata.get_object_categories()
+
+    def add_object_category(self, category_id):
+        if category_id in self.get_object_categories():
+            return "[ERROR] Add Object Category: Object Category Exists"
+        else:
+            self.get_object_categories().append(category_id)
+            self.configuration.get_object_category_values()[category_id] = list()
+            self.assignment.get_object_category_assignments()[category_id] = dict()
+            return self.get_object_categories()
+
+    def del_object_category(self, category_id):
+        if category_id in self.get_object_categories():
+            self.configuration.get_object_category_values().pop(category_id)
+            self.assignment.get_object_category_assignments().pop(category_id)
+            self.get_object_categories().remove(category_id)
+            return self.get_object_categories()
+        else:
+            return "[ERROR] Del Object Category: Object Category Unknown"
+
+    def get_meta_rule(self):
+        return self.metadata.get_meta_rule()
+
+    # ---------------- configuration api ----------------
+
+    def get_subject_category_values(self, category_id):
+        return self.configuration.get_subject_category_values()[category_id]
+
+    def add_subject_category_value(self, category_id, category_value):
+        if category_value in self.configuration.get_subject_category_values()[category_id]:
+            return "[ERROR] Add Subject Category Value: Subject Category Value Exists"
+        else:
+            self.configuration.get_subject_category_values()[category_id].append(category_value)
+            return self.configuration.get_subject_category_values()[category_id]
+
+    def del_subject_category_value(self, category_id, category_value):
+        if category_value in self.configuration.get_subject_category_values()[category_id]:
+            self.configuration.get_subject_category_values()[category_id].remove(category_value)
+            return self.configuration.get_subject_category_values()[category_id]
+        else:
+            return "[ERROR] Del Subject Category Value: Subject Category Value Unknown"
+
+    def get_object_category_values(self, category_id):
+        return self.configuration.get_object_category_values()[category_id]
+
+    def add_object_category_value(self, category_id, category_value):
+        if category_value in self.configuration.get_object_category_values()[category_id]:
+            return "[ERROR] Add Object Category Value: Object Category Value Exists"
+        else:
+            self.configuration.get_object_category_values()[category_id].append(category_value)
+            return self.configuration.get_object_category_values()[category_id]
+
+    def del_object_category_value(self, category_id, category_value):
+        if category_value in self.configuration.get_object_category_values()[category_id]:
+            self.configuration.get_object_category_values()[category_id].remove(category_value)
+            return self.configuration.get_object_category_values()[category_id]
+        else:
+            return "[ERROR] Del Object Category Value: Object Category Value Unknown"
+
+    def get_meta_rules(self):
+        return self.metadata.get_meta_rule()
+
+    def _build_rule_from_list(self, relation, rule):
+        rule = list(rule)
+        _rule = dict()
+        _rule["sub_cat_value"] = dict()
+        _rule["obj_cat_value"] = dict()
+        if relation in self.metadata.get_meta_rule()["sub_meta_rules"]:
+            _rule["sub_cat_value"][relation] = dict()
+            _rule["obj_cat_value"][relation] = dict()
+            for s_category in self.metadata.get_meta_rule()["sub_meta_rules"][relation]["subject_categories"]:
+                _rule["sub_cat_value"][relation][s_category] = rule.pop(0)
+            for o_category in self.metadata.get_meta_rule()["sub_meta_rules"][relation]["object_categories"]:
+                _rule["obj_cat_value"][relation][o_category] = rule.pop(0)
+        return _rule
+
+    def get_rules(self, full=False):
+        if not full:
+            return self.configuration.get_rules()
+        rules = dict()
+        for key in self.configuration.get_rules():
+            rules[key] = map(lambda x: self._build_rule_from_list(key, x), self.configuration.get_rules()[key])
+        return rules
+
+    def add_rule(self, sub_cat_value_dict, obj_cat_value_dict):
+        for _relation in self.metadata.get_meta_rule()["sub_meta_rules"]:
+            _sub_rule = list()
+            for sub_subject_category in self.metadata.get_meta_rule()["sub_meta_rules"][_relation]["subject_categories"]:
+                try:
+                    if sub_cat_value_dict[_relation][sub_subject_category] \
+                            in self.configuration.get_subject_category_values()[sub_subject_category]:
+                        _sub_rule.append(sub_cat_value_dict[_relation][sub_subject_category])
+                    else:
+                        return "[Error] Add Rule: Subject Category Value Unknown"
+                except KeyError as e:
+                    # DThom: sometimes relation attribute is buggy, I don't know why...
+                    print(e)
+
+            #BUG: when adding a new category in rules despite it was previously adding
+            # data = {
+            #     "sub_cat_value":
+            #         {"relation_super":
+            #              {"subject_security_level": "high", "AMH_CAT": "AMH_VAL"}
+            #         },
+            #     "obj_cat_value":
+            #         {"relation_super":
+            #              {"object_security_level": "medium"}
+            #         }
+            # }
+            # traceback = """
+            # Traceback (most recent call last):
+            #   File "/moon/gui/views_json.py", line 20, in wrapped
+            #     result = function(*args, **kwargs)
+            #   File "/moon/gui/views_json.py", line 429, in rules
+            #     obj_cat_value=filter_input(data["obj_cat_value"]))
+            #   File "/usr/local/lib/python2.7/dist-packages/moon/core/pap/core.py", line 380, in add_rule
+            #     obj_cat_value)
+            #   File "/usr/local/lib/python2.7/dist-packages/moon/core/pdp/extension.py", line 414, in add_rule
+            #     if obj_cat_value_dict[_relation][sub_object_category] \
+            # KeyError: u'action'
+            # """
+            for sub_object_category in self.metadata.get_meta_rule()["sub_meta_rules"][_relation]["object_categories"]:
+                if obj_cat_value_dict[_relation][sub_object_category] \
+                        in self.configuration.get_object_category_values()[sub_object_category]:
+                    _sub_rule.append(obj_cat_value_dict[_relation][sub_object_category])
+                else:
+                    return "[Error] Add Rule: Object Category Value Unknown"
+
+            if _sub_rule in self.configuration.get_rules()[_relation]:
+                return "[Error] Add Rule: Rule Exists"
+            else:
+                self.configuration.get_rules()[_relation].append(_sub_rule)
+                return {
+                    sub_cat_value_dict.keys()[0]: ({
+                        "sub_cat_value": copy.deepcopy(sub_cat_value_dict),
+                        "obj_cat_value": copy.deepcopy(obj_cat_value_dict)
+                    }, )
+                }
+        return self.configuration.get_rules()
+
+    def del_rule(self, sub_cat_value_dict, obj_cat_value_dict):
+        for _relation in self.metadata.get_meta_rule()["sub_meta_rules"]:
+            _sub_rule = list()
+            for sub_subject_category in self.metadata.get_meta_rule()["sub_meta_rules"][_relation]["subject_categories"]:
+                _sub_rule.append(sub_cat_value_dict[_relation][sub_subject_category])
+
+            for sub_object_category in self.metadata.get_meta_rule()["sub_meta_rules"][_relation]["object_categories"]:
+                _sub_rule.append(obj_cat_value_dict[_relation][sub_object_category])
+
+            if _sub_rule in self.configuration.get_rules()[_relation]:
+                self.configuration.get_rules()[_relation].remove(_sub_rule)
+            else:
+                return "[Error] Del Rule: Rule Unknown"
+        return self.configuration.get_rules()
+
+    # ---------------- perimeter api ----------------
+
+    def get_subjects(self):
+        return self.perimeter.get_subjects()
+
+    def get_objects(self):
+        return self.perimeter.get_objects()
+
+    def add_subject(self, subject_id):
+        if subject_id in self.perimeter.get_subjects():
+            return "[ERROR] Add Subject: Subject Exists"
+        else:
+            self.perimeter.get_subjects().append(subject_id)
+            return self.perimeter.get_subjects()
+
+    def del_subject(self, subject_id):
+        if subject_id in self.perimeter.get_subjects():
+            self.perimeter.get_subjects().remove(subject_id)
+            return self.perimeter.get_subjects()
+        else:
+            return "[ERROR] Del Subject: Subject Unknown"
+
+    def add_object(self, object_id):
+        if object_id in self.perimeter.get_objects():
+            return "[ERROR] Add Object: Object Exists"
+        else:
+            self.perimeter.get_objects().append(object_id)
+            return self.perimeter.get_objects()
+
+    def del_object(self, object_id):
+        if object_id in self.perimeter.get_objects():
+            self.perimeter.get_objects().remove(object_id)
+            return self.perimeter.get_objects()
+        else:
+            return "[ERROR] Del Object: Object Unknown"
+
+    # ---------------- assignment api ----------------
+
+    def get_subject_assignments(self, category_id):
+        if category_id in self.metadata.get_subject_categories():
+            return self.assignment.get_subject_category_assignments()[category_id]
+        else:
+            return "[ERROR] Get Subject Assignment: Subject Category Unknown"
+
+    def add_subject_assignment(self, category_id, subject_id, category_value):
+        if category_id in self.metadata.get_subject_categories():
+            if subject_id in self.perimeter.get_subjects():
+                if category_value in self.configuration.get_subject_category_values()[category_id]:
+                    if category_id in self.assignment.get_subject_category_assignments().keys():
+                        if subject_id in self.assignment.get_subject_category_assignments()[category_id].keys():
+                            if category_value in self.assignment.get_subject_category_assignments()[category_id][subject_id]:
+                                return "[ERROR] Add Subject Assignment: Subject Assignment Exists"
+                            else:
+                                self.assignment.get_subject_category_assignments()[category_id][subject_id].extend([category_value])
+                        else:
+                            self.assignment.get_subject_category_assignments()[category_id][subject_id] = [category_value]
+                    else:
+                        self.assignment.get_subject_category_assignments()[category_id] = {subject_id: [category_value]}
+                    return self.assignment.get_subject_category_assignments()
+                else:
+                    return "[ERROR] Add Subject Assignment: Subject Category Value Unknown"
+            else:
+                return "[ERROR] Add Subject Assignment: Subject Unknown"
+        else:
+            return "[ERROR] Add Subject Assignment: Subject Category Unknown"
+
+    def del_subject_assignment(self, category_id, subject_id, category_value):
+        if category_id in self.metadata.get_subject_categories():
+            if subject_id in self.perimeter.get_subjects():
+                if category_value in self.configuration.get_subject_category_values()[category_id]:
+                    if len(self.assignment.get_subject_category_assignments()[category_id][subject_id]) >= 2:
+                        self.assignment.get_subject_category_assignments()[category_id][subject_id].remove(category_value)
+                    else:
+                        self.assignment.get_subject_category_assignments()[category_id].pop(subject_id)
+                    return self.assignment.get_subject_category_assignments()
+                else:
+                    return "[ERROR] Del Subject Assignment: Assignment Unknown"
+            else:
+                return "[ERROR] Del Subject Assignment: Subject Unknown"
+        else:
+            return "[ERROR] Del Subject Assignment: Subject Category Unknown"
+
+    def get_object_assignments(self, category_id):
+        if category_id in self.metadata.get_object_categories():
+            return self.assignment.get_object_category_assignments()[category_id]
+        else:
+            return "[ERROR] Get Object Assignment: Object Category Unknown"
+
+    def add_object_assignment(self, category_id, object_id, category_value):
+        if category_id in self.metadata.get_object_categories():
+            if object_id in self.perimeter.get_objects():
+                if category_value in self.configuration.get_object_category_values()[category_id]:
+                    if category_id in self.assignment.get_object_category_assignments().keys():
+                        if object_id in self.assignment.get_object_category_assignments()[category_id].keys():
+                            if category_value in self.assignment.get_object_category_assignments()[category_id][object_id]:
+                                return "[ERROR] Add Object Assignment: Object Assignment Exists"
+                            else:
+                                self.assignment.get_object_category_assignments()[category_id][object_id].extend([category_value])
+                        else:
+                            self.assignment.get_object_category_assignments()[category_id][object_id] = [category_value]
+                    else:
+                        self.assignment.get_object_category_assignments()[category_id] = {object_id: [category_value]}
+                    return self.assignment.get_object_category_assignments()
+                else:
+                    return "[ERROR] Add Object Assignment: Object Category Value Unknown"
+            else:
+                return "[ERROR] Add Object Assignment: Object Unknown"
+        else:
+            return "[ERROR] Add Object Assignment: Object Category Unknown"
+
+    def del_object_assignment(self, category_id, object_id, category_value):
+        if category_id in self.metadata.get_object_categories():
+            if object_id in self.perimeter.get_objects():
+                if category_value in self.configuration.get_object_category_values()[category_id]:
+                    if len(self.assignment.get_object_category_assignments()[category_id][object_id]) >= 2:
+                        self.assignment.get_object_category_assignments()[category_id][object_id].remove(category_value)
+                    else:
+                        self.assignment.get_object_category_assignments()[category_id].pop(object_id)
+                    return self.assignment.get_object_category_assignments()
+                else:
+                    return "[ERROR] Del Object Assignment: Assignment Unknown"
+            else:
+                return "[ERROR] Del Object Assignment: Object Unknown"
+        else:
+            return "[ERROR] Del Object Assignment: Object Category Unknown"
+
+    # ---------------- inter-extension API ----------------
+
+    def create_requesting_collaboration(self, sub_list, vent_uuid, act):
+        _sub_cat_values = dict()
+        _obj_cat_values = dict()
+
+        if type(self.add_object(vent_uuid)) is not list:
+            return "[Error] Create Requesting Collaboration: No Success"
+        for _relation in self.get_meta_rule()["sub_meta_rules"]:
+            for _sub_cat_id in self.get_meta_rule()["sub_meta_rules"][_relation]["subject_categories"]:
+                _sub_cat_value = str(uuid4())
+                if type(self.add_subject_category_value(_sub_cat_id, _sub_cat_value)) is not list:
+                    return "[Error] Create Requesting Collaboration: No Success"
+                _sub_cat_values[_relation] = {_sub_cat_id: _sub_cat_value}
+                for _sub in sub_list:
+                    if type(self.add_subject_assignment(_sub_cat_id, _sub, _sub_cat_value)) is not dict:
+                        return "[Error] Create Requesting Collaboration: No Success"
+
+            for _obj_cat_id in self.get_meta_rule()["sub_meta_rules"][_relation]["object_categories"]:
+                if _obj_cat_id == 'action':
+                    _obj_cat_values[_relation][_obj_cat_id] = act
+                else:
+                    _obj_cat_value = str(uuid4())
+                    if type(self.add_object_category_value(_obj_cat_id, _obj_cat_value)) is not list:
+                        return "[Error] Create Requesting Collaboration: No Success"
+                    if type(self.add_object_assignment(_obj_cat_id, vent_uuid, _obj_cat_value)) is not dict:
+                        return "[Error] Create Requesting Collaboration: No Success"
+                    _obj_cat_values[_relation] = {_obj_cat_id: _obj_cat_value}
+
+        _rule = self.add_rule(_sub_cat_values, _obj_cat_values)
+        if type(_rule) is not dict:
+            return "[Error] Create Requesting Collaboration: No Success"
+        return {"subject_category_value_dict": _sub_cat_values, "object_category_value_dict": _obj_cat_values,
+                    "rule": _rule}
+
+    def destroy_requesting_collaboration(self, sub_list, vent_uuid, sub_cat_value_dict, obj_cat_value_dict):
+        for _relation in self.get_meta_rule()["sub_meta_rules"]:
+            for _sub_cat_id in self.get_meta_rule()["sub_meta_rules"][_relation]["subject_categories"]:
+                for _sub in sub_list:
+                    if type(self.del_subject_assignment(_sub_cat_id, _sub, sub_cat_value_dict[_relation][_sub_cat_id]))\
+                            is not dict:
+                        return "[Error] Destroy Requesting Collaboration: No Success"
+                if type(self.del_subject_category_value(_sub_cat_id, sub_cat_value_dict[_relation][_sub_cat_id])) \
+                        is not list:
+                    return "[Error] Destroy Requesting Collaboration: No Success"
+
+            for _obj_cat_id in self.get_meta_rule()["sub_meta_rules"][_relation]["object_categories"]:
+                if _obj_cat_id == "action":
+                    pass  # TODO: reconsidering the action as object attribute
+                else:
+                    if type(self.del_object_assignment(_obj_cat_id, vent_uuid, obj_cat_value_dict[_relation][_obj_cat_id])) is not dict:
+                        return "[Error] Destroy Requesting Collaboration: No Success"
+                    if type(self.del_object_category_value(_obj_cat_id, obj_cat_value_dict[_relation][_obj_cat_id])) is not list:
+                        return "[Error] Destroy Requesting Collaboration: No Success"
+
+        if type(self.del_rule(sub_cat_value_dict, obj_cat_value_dict)) is not dict:
+            return "[Error] Destroy Requesting Collaboration: No Success"
+        if type(self.del_object(vent_uuid)) is not list:
+            return "[Error] Destroy Requesting Collaboration: No Success"
+        return "[Destroy Requesting Collaboration] OK"
+
+    def create_requested_collaboration(self, vent_uuid, obj_list, act):
+        _sub_cat_values = dict()
+        _obj_cat_values = dict()
+
+        if type(self.add_subject(vent_uuid)) is not list:
+            return "[Error] Create Requested Collaboration: No Success"
+
+        for _relation in self.get_meta_rule()["sub_meta_rules"]:
+            for _sub_cat_id in self.get_meta_rule()["sub_meta_rules"][_relation]["subject_categories"]:
+                _sub_cat_value = str(uuid4())
+                if type(self.add_subject_category_value(_sub_cat_id, _sub_cat_value)) is not list:
+                    return "[Error] Create Requested Collaboration: No Success"
+                _sub_cat_values[_relation] = {_sub_cat_id: _sub_cat_value}
+                if type(self.add_subject_assignment(_sub_cat_id, vent_uuid, _sub_cat_value)) is not dict:
+                    return "[Error] Create Requested Collaboration: No Success"
+
+            for _obj_cat_id in self.get_meta_rule()["sub_meta_rules"][_relation]["object_categories"]:
+                if _obj_cat_id == 'action':
+                    _obj_cat_values[_relation][_obj_cat_id] = act
+                else:
+                    _obj_cat_value = str(uuid4())
+                    if type(self.add_object_category_value(_obj_cat_id, _obj_cat_value)) is not list:
+                        return "[Error] Create Requested Collaboration: No Success"
+                    _obj_cat_values[_relation] = {_obj_cat_id: _obj_cat_value}
+                    for _obj in obj_list:
+                        if type(self.add_object_assignment(_obj_cat_id, _obj, _obj_cat_value)) is not dict:
+                            return "[Error] Create Requested Collaboration: No Success"
+
+        _rule = self.add_rule(_sub_cat_values, _obj_cat_values)
+        if type(_rule) is not dict:
+            return "[Error] Create Requested Collaboration: No Success"
+        return {"subject_category_value_dict": _sub_cat_values, "object_category_value_dict": _obj_cat_values,
+                "rule": _rule}
+
+    def destroy_requested_collaboration(self, vent_uuid, obj_list, sub_cat_value_dict, obj_cat_value_dict):
+        for _relation in self.get_meta_rule()["sub_meta_rules"]:
+            for _sub_cat_id in self.get_meta_rule()["sub_meta_rules"][_relation]["subject_categories"]:
+                if type(self.del_subject_assignment(_sub_cat_id, vent_uuid, sub_cat_value_dict[_relation][_sub_cat_id])) is not dict:
+                    return "[Error] Destroy Requested Collaboration: No Success"
+                if type(self.del_subject_category_value(_sub_cat_id, sub_cat_value_dict[_relation][_sub_cat_id])) is not list:
+                    return "[Error] Destroy Requested Collaboration: No Success"
+
+            for _obj_cat_id in self.get_meta_rule()["sub_meta_rules"][_relation]["object_categories"]:
+                if _obj_cat_id == "action":
+                    pass  # TODO: reconsidering the action as object attribute
+                else:
+                    for _obj in obj_list:
+                        if type(self.del_object_assignment(_obj_cat_id, _obj, obj_cat_value_dict[_relation][_obj_cat_id])) is not dict:
+                            return "[Error] Destroy Requested Collaboration: No Success"
+                    if type(self.del_object_category_value(_obj_cat_id, obj_cat_value_dict[_relation][_obj_cat_id])) is not list:
+                        return "[Error] Destroy Requested Collaboration: No Success"
+
+        if type(self.del_rule(sub_cat_value_dict, obj_cat_value_dict)) is not dict:
+            return "[Error] Destroy Requested Collaboration: No Success"
+        if type(self.del_subject(vent_uuid)) is not list:
+            return "[Error] Destroy Requested Collaboration: No Success"
+        return "[Destroy Requested Collaboration] OK"
+
+    # ---------------- sync_db api ----------------
+
+    def get_data(self):
+        data = dict()
+        data["metadata"] = self.metadata.get_data()
+        data["configuration"] = self.configuration.get_data()
+        data["perimeter"] = self.perimeter.get_data()
+        data["assignment"] = self.assignment.get_data()
+        return data
+
+    def set_data(self, extension_data):
+        self.metadata.set_data(extension_data["metadata"])
+        self.configuration.set_data(extension_data["configuration"])
+        self.perimeter.set_data(extension_data["perimeter"])
+        self.assignment.set_data(extension_data["assignment"])
diff --git a/keystone-moon/keystone/contrib/moon/migrate_repo/__init__.py b/keystone-moon/keystone/contrib/moon/migrate_repo/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/contrib/moon/migrate_repo/migrate.cfg b/keystone-moon/keystone/contrib/moon/migrate_repo/migrate.cfg
new file mode 100644 (file)
index 0000000..7a7bd1f
--- /dev/null
@@ -0,0 +1,25 @@
+[db_settings]
+# Used to identify which repository this database is versioned under.
+# You can use the name of your project.
+repository_id=moon
+
+# The name of the database table used to track the schema version.
+# This name shouldn't already be used by your project.
+# If this is changed once a database is under version control, you'll need to
+# change the table name in each database too.
+version_table=migrate_version
+
+# When committing a change script, Migrate will attempt to generate the
+# sql for all supported databases; normally, if one of them fails - probably
+# because you don't have that database installed - it is ignored and the
+# commit continues, perhaps ending successfully.
+# Databases in this list MUST compile successfully during a commit, or the
+# entire commit will fail. List the databases your application will actually
+# be using to ensure your updates to that database work properly.
+# This must be a list; example: ['postgres','sqlite']
+required_dbs=[]
+
+# When creating new change scripts, Migrate will stamp the new script with
+# a version number. By default this is latest_version + 1. You can set this
+# to 'true' to tell Migrate to use the UTC timestamp instead.
+use_timestamp_numbering=False
diff --git a/keystone-moon/keystone/contrib/moon/migrate_repo/versions/001_moon.py b/keystone-moon/keystone/contrib/moon/migrate_repo/versions/001_moon.py
new file mode 100644 (file)
index 0000000..a49ca20
--- /dev/null
@@ -0,0 +1,194 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
+
+import sqlalchemy as sql
+from keystone.common import sql as k_sql
+
+
+def upgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    intra_extension_table = sql.Table(
+        'intra_extension',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('name', sql.String(64), nullable=False),
+        sql.Column('model', sql.String(64), nullable=True),
+        sql.Column('description', sql.Text(), nullable=True),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+    intra_extension_table.create(migrate_engine, checkfirst=True)
+
+    subjects_table = sql.Table(
+        'subject',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('subjects', k_sql.JsonBlob(), nullable=True),
+        sql.Column('intra_extension_uuid', sql.ForeignKey("intra_extension.id"), nullable=False),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+    subjects_table.create(migrate_engine, checkfirst=True)
+
+    objects_table = sql.Table(
+        'object',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('objects', k_sql.JsonBlob(), nullable=True),
+        sql.Column('intra_extension_uuid', sql.ForeignKey("intra_extension.id"), nullable=False),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+    objects_table.create(migrate_engine, checkfirst=True)
+
+    actions_table = sql.Table(
+        'action',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('actions', k_sql.JsonBlob(), nullable=True),
+        sql.Column('intra_extension_uuid', sql.ForeignKey("intra_extension.id"), nullable=False),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+    actions_table.create(migrate_engine, checkfirst=True)
+
+    subject_categories_table = sql.Table(
+        'subject_category',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('subject_categories', k_sql.JsonBlob(), nullable=True),
+        sql.Column('intra_extension_uuid', sql.ForeignKey("intra_extension.id"), nullable=False),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+    subject_categories_table.create(migrate_engine, checkfirst=True)
+
+    object_categories_table = sql.Table(
+        'object_category',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('object_categories', k_sql.JsonBlob(), nullable=True),
+        sql.Column('intra_extension_uuid', sql.ForeignKey("intra_extension.id"), nullable=False),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+    object_categories_table.create(migrate_engine, checkfirst=True)
+
+    action_categories_table = sql.Table(
+        'action_category',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('action_categories', k_sql.JsonBlob(), nullable=True),
+        sql.Column('intra_extension_uuid', sql.ForeignKey("intra_extension.id"), nullable=False),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+    action_categories_table.create(migrate_engine, checkfirst=True)
+
+    subject_category_values_table = sql.Table(
+        'subject_category_scope',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('subject_category_scope', k_sql.JsonBlob(), nullable=True),
+        sql.Column('intra_extension_uuid', sql.ForeignKey("intra_extension.id"), nullable=False),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+    subject_category_values_table.create(migrate_engine, checkfirst=True)
+
+    object_category_values_table = sql.Table(
+        'object_category_scope',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('object_category_scope', k_sql.JsonBlob(), nullable=True),
+        sql.Column('intra_extension_uuid', sql.ForeignKey("intra_extension.id"), nullable=False),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+    object_category_values_table.create(migrate_engine, checkfirst=True)
+
+    action_category_values_table = sql.Table(
+        'action_category_scope',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('action_category_scope', k_sql.JsonBlob(), nullable=True),
+        sql.Column('intra_extension_uuid', sql.ForeignKey("intra_extension.id"), nullable=False),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+    action_category_values_table.create(migrate_engine, checkfirst=True)
+
+    subject_category_assignments_table = sql.Table(
+        'subject_category_assignment',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('subject_category_assignments', k_sql.JsonBlob(), nullable=True),
+        sql.Column('intra_extension_uuid', sql.ForeignKey("intra_extension.id"), nullable=False),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+    subject_category_assignments_table.create(migrate_engine, checkfirst=True)
+
+    object_category_assignments_table = sql.Table(
+        'object_category_assignment',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('object_category_assignments', k_sql.JsonBlob(), nullable=True),
+        sql.Column('intra_extension_uuid', sql.ForeignKey("intra_extension.id"), nullable=False),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+    object_category_assignments_table.create(migrate_engine, checkfirst=True)
+
+    action_category_assignments_table = sql.Table(
+        'action_category_assignment',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('action_category_assignments', k_sql.JsonBlob(), nullable=True),
+        sql.Column('intra_extension_uuid', sql.ForeignKey("intra_extension.id"), nullable=False),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+    action_category_assignments_table.create(migrate_engine, checkfirst=True)
+
+    meta_rule_table = sql.Table(
+        'metarule',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('sub_meta_rules', k_sql.JsonBlob(), nullable=True),
+        sql.Column('aggregation', sql.Text(), nullable=True),
+        sql.Column('intra_extension_uuid', sql.ForeignKey("intra_extension.id"), nullable=False),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+    meta_rule_table.create(migrate_engine, checkfirst=True)
+
+    rule_table = sql.Table(
+        'rule',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('rules', k_sql.JsonBlob(), nullable=True),
+        sql.Column('intra_extension_uuid', sql.ForeignKey("intra_extension.id"), nullable=False),
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+    rule_table.create(migrate_engine, checkfirst=True)
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    for _table in (
+        'subject',
+        'object',
+        'action',
+        'subject_category',
+        'object_category',
+        'action_category',
+        'subject_category_scope',
+        'object_category_scope',
+        'action_category_scope',
+        'subject_category_assignment',
+        'object_category_assignment',
+        'action_category_assignment',
+        'metarule',
+        'rule',
+        'intra_extension',
+    ):
+        try:
+            table = sql.Table(_table, meta, autoload=True)
+            table.drop(migrate_engine, checkfirst=True)
+        except Exception as e:
+            print(e.message)
+
+
diff --git a/keystone-moon/keystone/contrib/moon/migrate_repo/versions/002_moon.py b/keystone-moon/keystone/contrib/moon/migrate_repo/versions/002_moon.py
new file mode 100644 (file)
index 0000000..a0f9095
--- /dev/null
@@ -0,0 +1,34 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
+
+import sqlalchemy as sql
+from keystone.common import sql as k_sql
+
+
+def upgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    region_table = sql.Table(
+        'inter_extension',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('requesting_intra_extension_uuid', sql.String(64), nullable=False),
+        sql.Column('requested_intra_extension_uuid', sql.String(64), nullable=False),
+        sql.Column('virtual_entity_uuid', sql.String(64), nullable=False),
+        sql.Column('genre', sql.String(64), nullable=False),
+        sql.Column('description', sql.Text(), nullable=True),
+
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+    region_table.create(migrate_engine, checkfirst=True)
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    table = sql.Table('inter_extension', meta, autoload=True)
+    table.drop(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/contrib/moon/migrate_repo/versions/003_moon.py b/keystone-moon/keystone/contrib/moon/migrate_repo/versions/003_moon.py
new file mode 100644 (file)
index 0000000..0693275
--- /dev/null
@@ -0,0 +1,32 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
+
+import sqlalchemy as sql
+from keystone.common import sql as k_sql
+
+
+def upgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    region_table = sql.Table(
+        'tenants',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('name', sql.String(128), nullable=True),
+        sql.Column('authz', sql.String(64), nullable=True),
+        sql.Column('admin', sql.String(64), nullable=True),
+
+        mysql_engine='InnoDB',
+        mysql_charset='utf8')
+    region_table.create(migrate_engine, checkfirst=True)
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    table = sql.Table('tenants', meta, autoload=True)
+    table.drop(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/contrib/moon/routers.py b/keystone-moon/keystone/contrib/moon/routers.py
new file mode 100644 (file)
index 0000000..e1eb113
--- /dev/null
@@ -0,0 +1,443 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
+
+"""WSGI Routers for the Moon service."""
+
+from keystone.contrib.moon import controllers
+from keystone.common import wsgi
+
+
+class Routers(wsgi.RoutersBase):
+    """API Endpoints for the Moon extension.
+    """
+
+    PATH_PREFIX = '/OS-MOON'
+
+    @staticmethod
+    def _get_rel(component):
+        return 'http://docs.openstack.org/api/openstack-authz/3/param/{}'.format(component)
+
+    @staticmethod
+    def _get_path(component):
+        return 'http://docs.openstack.org/api/openstack-authz/3/param/{}'.format(component)
+
+    def append_v3_routers(self, mapper, routers):
+        # Controllers creation
+        authz_controller = controllers.Authz_v3()
+        intra_ext_controller = controllers.IntraExtensions()
+        authz_policies_controller = controllers.AuthzPolicies()
+        tenants_controller = controllers.Tenants()
+        logs_controller = controllers.Logs()
+        inter_ext_controller = controllers.InterExtensions()
+
+        # Authz route
+        self._add_resource(
+            mapper, authz_controller,
+            path=self.PATH_PREFIX+'/authz/{tenant_id}/{subject_id}/{object_id}/{action_id}',
+            get_action='get_authz',
+            rel=self._get_rel('authz'),
+            path_vars={
+                'tenant_id': self._get_path('tenants'),
+                'subject_id': self._get_path('subjects'),
+                'object_id': self._get_path('objects'),
+                'action_id': self._get_path('actions'),
+            })
+
+        # IntraExtensions route
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions',
+            get_action='get_intra_extensions',
+            post_action='create_intra_extension',
+            rel=self._get_rel('intra_extensions'),
+            path_vars={})
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}',
+            get_action='get_intra_extension',
+            delete_action='delete_intra_extension',
+            rel=self._get_rel('intra_extensions'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+
+        self._add_resource(
+            mapper, authz_policies_controller,
+            path=self.PATH_PREFIX+'/authz_policies',
+            get_action='get_authz_policies',
+            rel=self._get_rel('authz_policies'),
+            path_vars={})
+
+        # Perimeter route
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/subjects',
+            get_action='get_subjects',
+            post_action='add_subject',
+            rel=self._get_rel('subjects'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/subjects/{subject_id}',
+            delete_action='del_subject',
+            rel=self._get_rel('subjects'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/objects',
+            get_action='get_objects',
+            post_action='add_object',
+            rel=self._get_rel('subjects'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/objects/{object_id}',
+            delete_action='del_object',
+            rel=self._get_rel('objects'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/actions',
+            get_action='get_actions',
+            post_action='add_action',
+            rel=self._get_rel('actions'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/actions/{action_id}',
+            delete_action='del_action',
+            rel=self._get_rel('actions'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+
+        # Metadata route
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/subject_categories',
+            get_action='get_subject_categories',
+            post_action='add_subject_category',
+            rel=self._get_rel('subject_categories'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/subject_categories/{subject_category_id}',
+            delete_action='del_subject_category',
+            rel=self._get_rel('subject_categories'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/object_categories',
+            get_action='get_object_categories',
+            post_action='add_object_category',
+            rel=self._get_rel('object_categories'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/object_categories/{object_category_id}',
+            delete_action='del_object_category',
+            rel=self._get_rel('object_categories'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/action_categories',
+            get_action='get_action_categories',
+            post_action='add_action_category',
+            rel=self._get_rel('action_categories'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/action_categories/{action_category_id}',
+            delete_action='del_action_category',
+            rel=self._get_rel('action_categories'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+
+        # Scope route
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/subject_category_scope',
+            post_action='add_subject_category_scope',
+            rel=self._get_rel('subject_category_scope'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/subject_category_scope/{subject_category_id}',
+            get_action='get_subject_category_scope',
+            rel=self._get_rel('subject_category_scope'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/subject_category_scope/{subject_category_id}/{subject_category_scope_id}',
+            delete_action='del_subject_category_scope',
+            rel=self._get_rel('subject_category_scope'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/object_category_scope',
+            post_action='add_object_category_scope',
+            rel=self._get_rel('object_category_scope'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/object_category_scope/{object_category_id}',
+            get_action='get_object_category_scope',
+            rel=self._get_rel('object_category_scope'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/object_category_scope/{object_category_id}/{object_category_scope_id}',
+            delete_action='del_object_category_scope',
+            rel=self._get_rel('object_category_scope'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/action_category_scope',
+            post_action='add_action_category_scope',
+            rel=self._get_rel('action_category_scope'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/action_category_scope/{action_category_id}',
+            get_action='get_action_category_scope',
+            rel=self._get_rel('action_category_scope'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/action_category_scope/{action_category_id}/{action_category_scope_id}',
+            delete_action='del_action_category_scope',
+            rel=self._get_rel('action_category_scope'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        
+        # Assignment route
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/subject_assignments/{subject_id}',
+            get_action='get_subject_assignments',
+            rel=self._get_rel('subject_assignments'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/subject_assignments',
+            post_action='add_subject_assignment',
+            rel=self._get_rel('subject_assignments'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/'
+                                  'subject_assignments/{subject_id}/{subject_category}/{subject_category_scope}',
+            delete_action='del_subject_assignment',
+            rel=self._get_rel('subject_assignments'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/object_assignments/{object_id}',
+            get_action='get_object_assignments',
+            rel=self._get_rel('object_assignments'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/object_assignments',
+            post_action='add_object_assignment',
+            rel=self._get_rel('object_assignments'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/'
+                                  'object_assignments/{object_id}/{object_category}/{object_category_scope}',
+            delete_action='del_object_assignment',
+            rel=self._get_rel('object_assignments'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/action_assignments/{action_id}',
+            get_action='get_action_assignments',
+            rel=self._get_rel('action_assignments'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/action_assignments',
+            post_action='add_action_assignment',
+            rel=self._get_rel('action_assignments'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/'
+                                  'action_assignments/{action_id}/{action_category}/{action_category_scope}',
+            delete_action='del_action_assignment',
+            rel=self._get_rel('action_assignments'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+
+        # Metarule route
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/aggregation_algorithms',
+            get_action='get_aggregation_algorithms',
+            rel=self._get_rel('aggregation_algorithms'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/aggregation_algorithm',
+            get_action='get_aggregation_algorithm',
+            post_action='set_aggregation_algorithm',
+            rel=self._get_rel('aggregation_algorithms'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/sub_meta_rule',
+            get_action='get_sub_meta_rule',
+            post_action='set_sub_meta_rule',
+            rel=self._get_rel('sub_meta_rule'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/sub_meta_rule_relations',
+            get_action='get_sub_meta_rule_relations',
+            rel=self._get_rel('sub_meta_rule_relations'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+
+        # Rules route
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/sub_rules',
+            get_action='get_sub_rules',
+            post_action='set_sub_rule',
+            rel=self._get_rel('sub_rules'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+        self._add_resource(
+            mapper, intra_ext_controller,
+            path=self.PATH_PREFIX+'/intra_extensions/{intra_extensions_id}/sub_rules/{relation_name}/{rule}',
+            delete_action='del_sub_rule',
+            rel=self._get_rel('sub_rules'),
+            path_vars={
+                'intra_extensions_id': self._get_path('intra_extensions'),
+            })
+
+        # Tenants route
+        self._add_resource(
+            mapper, tenants_controller,
+            path=self.PATH_PREFIX+'/tenants',
+            get_action='get_tenants',
+            rel=self._get_rel('tenants'),
+            path_vars={})
+        self._add_resource(
+            mapper, tenants_controller,
+            path=self.PATH_PREFIX+'/tenant',
+            post_action='set_tenant',
+            rel=self._get_rel('tenants'),
+            path_vars={})
+        self._add_resource(
+            mapper, tenants_controller,
+            path=self.PATH_PREFIX+'/tenant/{tenant_uuid}',
+            get_action='get_tenant',
+            delete_action='delete_tenant',
+            rel=self._get_rel('tenants'),
+            path_vars={
+                'tenant_uuid': self._get_path('tenants'),
+            })
+
+        # Logs route
+        self._add_resource(
+            mapper, logs_controller,
+            path=self.PATH_PREFIX+'/logs',
+            get_action='get_logs',
+            rel=self._get_rel('logs'),
+            path_vars={
+            })
+        self._add_resource(
+            mapper, logs_controller,
+            path=self.PATH_PREFIX+'/logs/{options}',
+            get_action='get_logs',
+            rel=self._get_rel('logs'),
+            path_vars={
+            })
+
+        # InterExtensions route
+        # self._add_resource(
+        #     mapper, inter_ext_controller,
+        #     path=self.PATH_PREFIX+'/inter_extensions',
+        #     get_action='get_inter_extensions',
+        #     post_action='create_inter_extension',
+        #     rel=self._get_rel('inter_extensions'),
+        #     path_vars={})
+        # self._add_resource(
+        #     mapper, inter_ext_controller,
+        #     path=self.PATH_PREFIX+'/inter_extensions/{inter_extensions_id}',
+        #     get_action='get_inter_extension',
+        #     delete_action='delete_inter_extension',
+        #     rel=self._get_rel('inter_extensions'),
+        #     path_vars={
+        #         'inter_extensions_id': self._get_path('inter_extensions'),
+        #     })
diff --git a/keystone-moon/keystone/contrib/oauth1/__init__.py b/keystone-moon/keystone/contrib/oauth1/__init__.py
new file mode 100644 (file)
index 0000000..8cab249
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.contrib.oauth1.core import *  # noqa
diff --git a/keystone-moon/keystone/contrib/oauth1/backends/__init__.py b/keystone-moon/keystone/contrib/oauth1/backends/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/contrib/oauth1/backends/sql.py b/keystone-moon/keystone/contrib/oauth1/backends/sql.py
new file mode 100644 (file)
index 0000000..c6ab6e5
--- /dev/null
@@ -0,0 +1,272 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import random as _random
+import uuid
+
+from oslo_serialization import jsonutils
+from oslo_utils import timeutils
+import six
+
+from keystone.common import sql
+from keystone.contrib.oauth1 import core
+from keystone import exception
+from keystone.i18n import _
+
+
+random = _random.SystemRandom()
+
+
+class Consumer(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'consumer'
+    attributes = ['id', 'description', 'secret']
+    id = sql.Column(sql.String(64), primary_key=True, nullable=False)
+    description = sql.Column(sql.String(64), nullable=True)
+    secret = sql.Column(sql.String(64), nullable=False)
+    extra = sql.Column(sql.JsonBlob(), nullable=False)
+
+
+class RequestToken(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'request_token'
+    attributes = ['id', 'request_secret',
+                  'verifier', 'authorizing_user_id', 'requested_project_id',
+                  'role_ids', 'consumer_id', 'expires_at']
+    id = sql.Column(sql.String(64), primary_key=True, nullable=False)
+    request_secret = sql.Column(sql.String(64), nullable=False)
+    verifier = sql.Column(sql.String(64), nullable=True)
+    authorizing_user_id = sql.Column(sql.String(64), nullable=True)
+    requested_project_id = sql.Column(sql.String(64), nullable=False)
+    role_ids = sql.Column(sql.Text(), nullable=True)
+    consumer_id = sql.Column(sql.String(64), sql.ForeignKey('consumer.id'),
+                             nullable=False, index=True)
+    expires_at = sql.Column(sql.String(64), nullable=True)
+
+    @classmethod
+    def from_dict(cls, user_dict):
+        return cls(**user_dict)
+
+    def to_dict(self):
+        return dict(six.iteritems(self))
+
+
+class AccessToken(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'access_token'
+    attributes = ['id', 'access_secret', 'authorizing_user_id',
+                  'project_id', 'role_ids', 'consumer_id',
+                  'expires_at']
+    id = sql.Column(sql.String(64), primary_key=True, nullable=False)
+    access_secret = sql.Column(sql.String(64), nullable=False)
+    authorizing_user_id = sql.Column(sql.String(64), nullable=False,
+                                     index=True)
+    project_id = sql.Column(sql.String(64), nullable=False)
+    role_ids = sql.Column(sql.Text(), nullable=False)
+    consumer_id = sql.Column(sql.String(64), sql.ForeignKey('consumer.id'),
+                             nullable=False)
+    expires_at = sql.Column(sql.String(64), nullable=True)
+
+    @classmethod
+    def from_dict(cls, user_dict):
+        return cls(**user_dict)
+
+    def to_dict(self):
+        return dict(six.iteritems(self))
+
+
+class OAuth1(object):
+    def _get_consumer(self, session, consumer_id):
+        consumer_ref = session.query(Consumer).get(consumer_id)
+        if consumer_ref is None:
+            raise exception.NotFound(_('Consumer not found'))
+        return consumer_ref
+
+    def get_consumer_with_secret(self, consumer_id):
+        session = sql.get_session()
+        consumer_ref = self._get_consumer(session, consumer_id)
+        return consumer_ref.to_dict()
+
+    def get_consumer(self, consumer_id):
+        return core.filter_consumer(
+            self.get_consumer_with_secret(consumer_id))
+
+    def create_consumer(self, consumer):
+        consumer['secret'] = uuid.uuid4().hex
+        if not consumer.get('description'):
+            consumer['description'] = None
+        session = sql.get_session()
+        with session.begin():
+            consumer_ref = Consumer.from_dict(consumer)
+            session.add(consumer_ref)
+        return consumer_ref.to_dict()
+
+    def _delete_consumer(self, session, consumer_id):
+        consumer_ref = self._get_consumer(session, consumer_id)
+        session.delete(consumer_ref)
+
+    def _delete_request_tokens(self, session, consumer_id):
+        q = session.query(RequestToken)
+        req_tokens = q.filter_by(consumer_id=consumer_id)
+        req_tokens_list = set([x.id for x in req_tokens])
+        for token_id in req_tokens_list:
+            token_ref = self._get_request_token(session, token_id)
+            session.delete(token_ref)
+
+    def _delete_access_tokens(self, session, consumer_id):
+        q = session.query(AccessToken)
+        acc_tokens = q.filter_by(consumer_id=consumer_id)
+        acc_tokens_list = set([x.id for x in acc_tokens])
+        for token_id in acc_tokens_list:
+            token_ref = self._get_access_token(session, token_id)
+            session.delete(token_ref)
+
+    def delete_consumer(self, consumer_id):
+        session = sql.get_session()
+        with session.begin():
+            self._delete_request_tokens(session, consumer_id)
+            self._delete_access_tokens(session, consumer_id)
+            self._delete_consumer(session, consumer_id)
+
+    def list_consumers(self):
+        session = sql.get_session()
+        cons = session.query(Consumer)
+        return [core.filter_consumer(x.to_dict()) for x in cons]
+
+    def update_consumer(self, consumer_id, consumer):
+        session = sql.get_session()
+        with session.begin():
+            consumer_ref = self._get_consumer(session, consumer_id)
+            old_consumer_dict = consumer_ref.to_dict()
+            old_consumer_dict.update(consumer)
+            new_consumer = Consumer.from_dict(old_consumer_dict)
+            consumer_ref.description = new_consumer.description
+            consumer_ref.extra = new_consumer.extra
+        return core.filter_consumer(consumer_ref.to_dict())
+
+    def create_request_token(self, consumer_id, project_id, token_duration,
+                             request_token_id=None, request_token_secret=None):
+        if request_token_id is None:
+            request_token_id = uuid.uuid4().hex
+        if request_token_secret is None:
+            request_token_secret = uuid.uuid4().hex
+        expiry_date = None
+        if token_duration:
+            now = timeutils.utcnow()
+            future = now + datetime.timedelta(seconds=token_duration)
+            expiry_date = timeutils.isotime(future, subsecond=True)
+
+        ref = {}
+        ref['id'] = request_token_id
+        ref['request_secret'] = request_token_secret
+        ref['verifier'] = None
+        ref['authorizing_user_id'] = None
+        ref['requested_project_id'] = project_id
+        ref['role_ids'] = None
+        ref['consumer_id'] = consumer_id
+        ref['expires_at'] = expiry_date
+        session = sql.get_session()
+        with session.begin():
+            token_ref = RequestToken.from_dict(ref)
+            session.add(token_ref)
+        return token_ref.to_dict()
+
+    def _get_request_token(self, session, request_token_id):
+        token_ref = session.query(RequestToken).get(request_token_id)
+        if token_ref is None:
+            raise exception.NotFound(_('Request token not found'))
+        return token_ref
+
+    def get_request_token(self, request_token_id):
+        session = sql.get_session()
+        token_ref = self._get_request_token(session, request_token_id)
+        return token_ref.to_dict()
+
+    def authorize_request_token(self, request_token_id, user_id,
+                                role_ids):
+        session = sql.get_session()
+        with session.begin():
+            token_ref = self._get_request_token(session, request_token_id)
+            token_dict = token_ref.to_dict()
+            token_dict['authorizing_user_id'] = user_id
+            token_dict['verifier'] = ''.join(random.sample(core.VERIFIER_CHARS,
+                                                           8))
+            token_dict['role_ids'] = jsonutils.dumps(role_ids)
+
+            new_token = RequestToken.from_dict(token_dict)
+            for attr in RequestToken.attributes:
+                if (attr == 'authorizing_user_id' or attr == 'verifier'
+                        or attr == 'role_ids'):
+                    setattr(token_ref, attr, getattr(new_token, attr))
+
+        return token_ref.to_dict()
+
+    def create_access_token(self, request_token_id, token_duration,
+                            access_token_id=None, access_token_secret=None):
+        if access_token_id is None:
+            access_token_id = uuid.uuid4().hex
+        if access_token_secret is None:
+            access_token_secret = uuid.uuid4().hex
+        session = sql.get_session()
+        with session.begin():
+            req_token_ref = self._get_request_token(session, request_token_id)
+            token_dict = req_token_ref.to_dict()
+
+            expiry_date = None
+            if token_duration:
+                now = timeutils.utcnow()
+                future = now + datetime.timedelta(seconds=token_duration)
+                expiry_date = timeutils.isotime(future, subsecond=True)
+
+            # add Access Token
+            ref = {}
+            ref['id'] = access_token_id
+            ref['access_secret'] = access_token_secret
+            ref['authorizing_user_id'] = token_dict['authorizing_user_id']
+            ref['project_id'] = token_dict['requested_project_id']
+            ref['role_ids'] = token_dict['role_ids']
+            ref['consumer_id'] = token_dict['consumer_id']
+            ref['expires_at'] = expiry_date
+            token_ref = AccessToken.from_dict(ref)
+            session.add(token_ref)
+
+            # remove request token, it's been used
+            session.delete(req_token_ref)
+
+        return token_ref.to_dict()
+
+    def _get_access_token(self, session, access_token_id):
+        token_ref = session.query(AccessToken).get(access_token_id)
+        if token_ref is None:
+            raise exception.NotFound(_('Access token not found'))
+        return token_ref
+
+    def get_access_token(self, access_token_id):
+        session = sql.get_session()
+        token_ref = self._get_access_token(session, access_token_id)
+        return token_ref.to_dict()
+
+    def list_access_tokens(self, user_id):
+        session = sql.get_session()
+        q = session.query(AccessToken)
+        user_auths = q.filter_by(authorizing_user_id=user_id)
+        return [core.filter_token(x.to_dict()) for x in user_auths]
+
+    def delete_access_token(self, user_id, access_token_id):
+        session = sql.get_session()
+        with session.begin():
+            token_ref = self._get_access_token(session, access_token_id)
+            token_dict = token_ref.to_dict()
+            if token_dict['authorizing_user_id'] != user_id:
+                raise exception.Unauthorized(_('User IDs do not match'))
+
+            session.delete(token_ref)
diff --git a/keystone-moon/keystone/contrib/oauth1/controllers.py b/keystone-moon/keystone/contrib/oauth1/controllers.py
new file mode 100644 (file)
index 0000000..fb5d0bc
--- /dev/null
@@ -0,0 +1,417 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Extensions supporting OAuth1."""
+
+from oslo_config import cfg
+from oslo_serialization import jsonutils
+from oslo_utils import timeutils
+
+from keystone.common import controller
+from keystone.common import dependency
+from keystone.common import wsgi
+from keystone.contrib.oauth1 import core as oauth1
+from keystone.contrib.oauth1 import validator
+from keystone import exception
+from keystone.i18n import _
+from keystone.models import token_model
+from keystone import notifications
+
+
+CONF = cfg.CONF
+
+
+@notifications.internal(notifications.INVALIDATE_USER_OAUTH_CONSUMER_TOKENS,
+                        resource_id_arg_index=0)
+def _emit_user_oauth_consumer_token_invalidate(payload):
+    # This is a special case notification that expect the payload to be a dict
+    # containing the user_id and the consumer_id. This is so that the token
+    # provider can invalidate any tokens in the token persistence if
+    # token persistence is enabled
+    pass
+
+
+@dependency.requires('oauth_api', 'token_provider_api')
+class ConsumerCrudV3(controller.V3Controller):
+    collection_name = 'consumers'
+    member_name = 'consumer'
+
+    @classmethod
+    def base_url(cls, context, path=None):
+        """Construct a path and pass it to V3Controller.base_url method."""
+
+        # NOTE(stevemar): Overriding path to /OS-OAUTH1/consumers so that
+        # V3Controller.base_url handles setting the self link correctly.
+        path = '/OS-OAUTH1/' + cls.collection_name
+        return controller.V3Controller.base_url(context, path=path)
+
+    @controller.protected()
+    def create_consumer(self, context, consumer):
+        ref = self._assign_unique_id(self._normalize_dict(consumer))
+        initiator = notifications._get_request_audit_info(context)
+        consumer_ref = self.oauth_api.create_consumer(ref, initiator)
+        return ConsumerCrudV3.wrap_member(context, consumer_ref)
+
+    @controller.protected()
+    def update_consumer(self, context, consumer_id, consumer):
+        self._require_matching_id(consumer_id, consumer)
+        ref = self._normalize_dict(consumer)
+        self._validate_consumer_ref(ref)
+        initiator = notifications._get_request_audit_info(context)
+        ref = self.oauth_api.update_consumer(consumer_id, ref, initiator)
+        return ConsumerCrudV3.wrap_member(context, ref)
+
+    @controller.protected()
+    def list_consumers(self, context):
+        ref = self.oauth_api.list_consumers()
+        return ConsumerCrudV3.wrap_collection(context, ref)
+
+    @controller.protected()
+    def get_consumer(self, context, consumer_id):
+        ref = self.oauth_api.get_consumer(consumer_id)
+        return ConsumerCrudV3.wrap_member(context, ref)
+
+    @controller.protected()
+    def delete_consumer(self, context, consumer_id):
+        user_token_ref = token_model.KeystoneToken(
+            token_id=context['token_id'],
+            token_data=self.token_provider_api.validate_token(
+                context['token_id']))
+        payload = {'user_id': user_token_ref.user_id,
+                   'consumer_id': consumer_id}
+        _emit_user_oauth_consumer_token_invalidate(payload)
+        initiator = notifications._get_request_audit_info(context)
+        self.oauth_api.delete_consumer(consumer_id, initiator)
+
+    def _validate_consumer_ref(self, consumer):
+        if 'secret' in consumer:
+            msg = _('Cannot change consumer secret')
+            raise exception.ValidationError(message=msg)
+
+
+@dependency.requires('oauth_api')
+class AccessTokenCrudV3(controller.V3Controller):
+    collection_name = 'access_tokens'
+    member_name = 'access_token'
+
+    @classmethod
+    def _add_self_referential_link(cls, context, ref):
+        # NOTE(lwolf): overriding method to add proper path to self link
+        ref.setdefault('links', {})
+        path = '/users/%(user_id)s/OS-OAUTH1/access_tokens' % {
+            'user_id': cls._get_user_id(ref)
+        }
+        ref['links']['self'] = cls.base_url(context, path) + '/' + ref['id']
+
+    @controller.protected()
+    def get_access_token(self, context, user_id, access_token_id):
+        access_token = self.oauth_api.get_access_token(access_token_id)
+        if access_token['authorizing_user_id'] != user_id:
+            raise exception.NotFound()
+        access_token = self._format_token_entity(context, access_token)
+        return AccessTokenCrudV3.wrap_member(context, access_token)
+
+    @controller.protected()
+    def list_access_tokens(self, context, user_id):
+        auth_context = context.get('environment',
+                                   {}).get('KEYSTONE_AUTH_CONTEXT', {})
+        if auth_context.get('is_delegated_auth'):
+            raise exception.Forbidden(
+                _('Cannot list request tokens'
+                  ' with a token issued via delegation.'))
+        refs = self.oauth_api.list_access_tokens(user_id)
+        formatted_refs = ([self._format_token_entity(context, x)
+                           for x in refs])
+        return AccessTokenCrudV3.wrap_collection(context, formatted_refs)
+
+    @controller.protected()
+    def delete_access_token(self, context, user_id, access_token_id):
+        access_token = self.oauth_api.get_access_token(access_token_id)
+        consumer_id = access_token['consumer_id']
+        payload = {'user_id': user_id, 'consumer_id': consumer_id}
+        _emit_user_oauth_consumer_token_invalidate(payload)
+        initiator = notifications._get_request_audit_info(context)
+        return self.oauth_api.delete_access_token(
+            user_id, access_token_id, initiator)
+
+    @staticmethod
+    def _get_user_id(entity):
+        return entity.get('authorizing_user_id', '')
+
+    def _format_token_entity(self, context, entity):
+
+        formatted_entity = entity.copy()
+        access_token_id = formatted_entity['id']
+        user_id = self._get_user_id(formatted_entity)
+        if 'role_ids' in entity:
+            formatted_entity.pop('role_ids')
+        if 'access_secret' in entity:
+            formatted_entity.pop('access_secret')
+
+        url = ('/users/%(user_id)s/OS-OAUTH1/access_tokens/%(access_token_id)s'
+               '/roles' % {'user_id': user_id,
+                           'access_token_id': access_token_id})
+
+        formatted_entity.setdefault('links', {})
+        formatted_entity['links']['roles'] = (self.base_url(context, url))
+
+        return formatted_entity
+
+
+@dependency.requires('oauth_api', 'role_api')
+class AccessTokenRolesV3(controller.V3Controller):
+    collection_name = 'roles'
+    member_name = 'role'
+
+    @controller.protected()
+    def list_access_token_roles(self, context, user_id, access_token_id):
+        access_token = self.oauth_api.get_access_token(access_token_id)
+        if access_token['authorizing_user_id'] != user_id:
+            raise exception.NotFound()
+        authed_role_ids = access_token['role_ids']
+        authed_role_ids = jsonutils.loads(authed_role_ids)
+        refs = ([self._format_role_entity(x) for x in authed_role_ids])
+        return AccessTokenRolesV3.wrap_collection(context, refs)
+
+    @controller.protected()
+    def get_access_token_role(self, context, user_id,
+                              access_token_id, role_id):
+        access_token = self.oauth_api.get_access_token(access_token_id)
+        if access_token['authorizing_user_id'] != user_id:
+            raise exception.Unauthorized(_('User IDs do not match'))
+        authed_role_ids = access_token['role_ids']
+        authed_role_ids = jsonutils.loads(authed_role_ids)
+        for authed_role_id in authed_role_ids:
+            if authed_role_id == role_id:
+                role = self._format_role_entity(role_id)
+                return AccessTokenRolesV3.wrap_member(context, role)
+        raise exception.RoleNotFound(_('Could not find role'))
+
+    def _format_role_entity(self, role_id):
+        role = self.role_api.get_role(role_id)
+        formatted_entity = role.copy()
+        if 'description' in role:
+            formatted_entity.pop('description')
+        if 'enabled' in role:
+            formatted_entity.pop('enabled')
+        return formatted_entity
+
+
+@dependency.requires('assignment_api', 'oauth_api',
+                     'resource_api', 'token_provider_api')
+class OAuthControllerV3(controller.V3Controller):
+    collection_name = 'not_used'
+    member_name = 'not_used'
+
+    def create_request_token(self, context):
+        headers = context['headers']
+        oauth_headers = oauth1.get_oauth_headers(headers)
+        consumer_id = oauth_headers.get('oauth_consumer_key')
+        requested_project_id = headers.get('Requested-Project-Id')
+
+        if not consumer_id:
+            raise exception.ValidationError(
+                attribute='oauth_consumer_key', target='request')
+        if not requested_project_id:
+            raise exception.ValidationError(
+                attribute='requested_project_id', target='request')
+
+        # NOTE(stevemar): Ensure consumer and requested project exist
+        self.resource_api.get_project(requested_project_id)
+        self.oauth_api.get_consumer(consumer_id)
+
+        url = self.base_url(context, context['path'])
+
+        req_headers = {'Requested-Project-Id': requested_project_id}
+        req_headers.update(headers)
+        request_verifier = oauth1.RequestTokenEndpoint(
+            request_validator=validator.OAuthValidator(),
+            token_generator=oauth1.token_generator)
+        h, b, s = request_verifier.create_request_token_response(
+            url,
+            http_method='POST',
+            body=context['query_string'],
+            headers=req_headers)
+
+        if (not b) or int(s) > 399:
+            msg = _('Invalid signature')
+            raise exception.Unauthorized(message=msg)
+
+        request_token_duration = CONF.oauth1.request_token_duration
+        initiator = notifications._get_request_audit_info(context)
+        token_ref = self.oauth_api.create_request_token(consumer_id,
+                                                        requested_project_id,
+                                                        request_token_duration,
+                                                        initiator)
+
+        result = ('oauth_token=%(key)s&oauth_token_secret=%(secret)s'
+                  % {'key': token_ref['id'],
+                     'secret': token_ref['request_secret']})
+
+        if CONF.oauth1.request_token_duration:
+            expiry_bit = '&oauth_expires_at=%s' % token_ref['expires_at']
+            result += expiry_bit
+
+        headers = [('Content-Type', 'application/x-www-urlformencoded')]
+        response = wsgi.render_response(result,
+                                        status=(201, 'Created'),
+                                        headers=headers)
+
+        return response
+
+    def create_access_token(self, context):
+        headers = context['headers']
+        oauth_headers = oauth1.get_oauth_headers(headers)
+        consumer_id = oauth_headers.get('oauth_consumer_key')
+        request_token_id = oauth_headers.get('oauth_token')
+        oauth_verifier = oauth_headers.get('oauth_verifier')
+
+        if not consumer_id:
+            raise exception.ValidationError(
+                attribute='oauth_consumer_key', target='request')
+        if not request_token_id:
+            raise exception.ValidationError(
+                attribute='oauth_token', target='request')
+        if not oauth_verifier:
+            raise exception.ValidationError(
+                attribute='oauth_verifier', target='request')
+
+        req_token = self.oauth_api.get_request_token(
+            request_token_id)
+
+        expires_at = req_token['expires_at']
+        if expires_at:
+            now = timeutils.utcnow()
+            expires = timeutils.normalize_time(
+                timeutils.parse_isotime(expires_at))
+            if now > expires:
+                raise exception.Unauthorized(_('Request token is expired'))
+
+        url = self.base_url(context, context['path'])
+
+        access_verifier = oauth1.AccessTokenEndpoint(
+            request_validator=validator.OAuthValidator(),
+            token_generator=oauth1.token_generator)
+        h, b, s = access_verifier.create_access_token_response(
+            url,
+            http_method='POST',
+            body=context['query_string'],
+            headers=headers)
+        params = oauth1.extract_non_oauth_params(b)
+        if len(params) != 0:
+            msg = _('There should not be any non-oauth parameters')
+            raise exception.Unauthorized(message=msg)
+
+        if req_token['consumer_id'] != consumer_id:
+            msg = _('provided consumer key does not match stored consumer key')
+            raise exception.Unauthorized(message=msg)
+
+        if req_token['verifier'] != oauth_verifier:
+            msg = _('provided verifier does not match stored verifier')
+            raise exception.Unauthorized(message=msg)
+
+        if req_token['id'] != request_token_id:
+            msg = _('provided request key does not match stored request key')
+            raise exception.Unauthorized(message=msg)
+
+        if not req_token.get('authorizing_user_id'):
+            msg = _('Request Token does not have an authorizing user id')
+            raise exception.Unauthorized(message=msg)
+
+        access_token_duration = CONF.oauth1.access_token_duration
+        initiator = notifications._get_request_audit_info(context)
+        token_ref = self.oauth_api.create_access_token(request_token_id,
+                                                       access_token_duration,
+                                                       initiator)
+
+        result = ('oauth_token=%(key)s&oauth_token_secret=%(secret)s'
+                  % {'key': token_ref['id'],
+                     'secret': token_ref['access_secret']})
+
+        if CONF.oauth1.access_token_duration:
+            expiry_bit = '&oauth_expires_at=%s' % (token_ref['expires_at'])
+            result += expiry_bit
+
+        headers = [('Content-Type', 'application/x-www-urlformencoded')]
+        response = wsgi.render_response(result,
+                                        status=(201, 'Created'),
+                                        headers=headers)
+
+        return response
+
+    @controller.protected()
+    def authorize_request_token(self, context, request_token_id, roles):
+        """An authenticated user is going to authorize a request token.
+
+        As a security precaution, the requested roles must match those in
+        the request token. Because this is in a CLI-only world at the moment,
+        there is not another easy way to make sure the user knows which roles
+        are being requested before authorizing.
+        """
+        auth_context = context.get('environment',
+                                   {}).get('KEYSTONE_AUTH_CONTEXT', {})
+        if auth_context.get('is_delegated_auth'):
+            raise exception.Forbidden(
+                _('Cannot authorize a request token'
+                  ' with a token issued via delegation.'))
+
+        req_token = self.oauth_api.get_request_token(request_token_id)
+
+        expires_at = req_token['expires_at']
+        if expires_at:
+            now = timeutils.utcnow()
+            expires = timeutils.normalize_time(
+                timeutils.parse_isotime(expires_at))
+            if now > expires:
+                raise exception.Unauthorized(_('Request token is expired'))
+
+        # put the roles in a set for easy comparison
+        authed_roles = set()
+        for role in roles:
+            authed_roles.add(role['id'])
+
+        # verify the authorizing user has the roles
+        user_token = token_model.KeystoneToken(
+            token_id=context['token_id'],
+            token_data=self.token_provider_api.validate_token(
+                context['token_id']))
+        user_id = user_token.user_id
+        project_id = req_token['requested_project_id']
+        user_roles = self.assignment_api.get_roles_for_user_and_project(
+            user_id, project_id)
+        cred_set = set(user_roles)
+
+        if not cred_set.issuperset(authed_roles):
+            msg = _('authorizing user does not have role required')
+            raise exception.Unauthorized(message=msg)
+
+        # create list of just the id's for the backend
+        role_list = list(authed_roles)
+
+        # verify the user has the project too
+        req_project_id = req_token['requested_project_id']
+        user_projects = self.assignment_api.list_projects_for_user(user_id)
+        for user_project in user_projects:
+            if user_project['id'] == req_project_id:
+                break
+        else:
+            msg = _("User is not a member of the requested project")
+            raise exception.Unauthorized(message=msg)
+
+        # finally authorize the token
+        authed_token = self.oauth_api.authorize_request_token(
+            request_token_id, user_id, role_list)
+
+        to_return = {'token': {'oauth_verifier': authed_token['verifier']}}
+        return to_return
diff --git a/keystone-moon/keystone/contrib/oauth1/core.py b/keystone-moon/keystone/contrib/oauth1/core.py
new file mode 100644 (file)
index 0000000..eeb3e11
--- /dev/null
@@ -0,0 +1,361 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Extensions supporting OAuth1."""
+
+from __future__ import absolute_import
+
+import abc
+import string
+import uuid
+
+import oauthlib.common
+from oauthlib import oauth1
+from oslo_config import cfg
+from oslo_log import log
+import six
+
+from keystone.common import dependency
+from keystone.common import extension
+from keystone.common import manager
+from keystone import exception
+from keystone.i18n import _LE
+from keystone import notifications
+
+
+RequestValidator = oauth1.RequestValidator
+Client = oauth1.Client
+AccessTokenEndpoint = oauth1.AccessTokenEndpoint
+ResourceEndpoint = oauth1.ResourceEndpoint
+AuthorizationEndpoint = oauth1.AuthorizationEndpoint
+SIG_HMAC = oauth1.SIGNATURE_HMAC
+RequestTokenEndpoint = oauth1.RequestTokenEndpoint
+oRequest = oauthlib.common.Request
+# The characters used to generate verifiers are limited to alphanumerical
+# values for ease of manual entry. Commonly confused characters are omitted.
+VERIFIER_CHARS = string.ascii_letters + string.digits
+CONFUSED_CHARS = 'jiIl1oO0'
+VERIFIER_CHARS = ''.join(c for c in VERIFIER_CHARS if c not in CONFUSED_CHARS)
+
+
+class Token(object):
+    def __init__(self, key, secret):
+        self.key = key
+        self.secret = secret
+        self.verifier = None
+
+    def set_verifier(self, verifier):
+        self.verifier = verifier
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+def token_generator(*args, **kwargs):
+    return uuid.uuid4().hex
+
+
+EXTENSION_DATA = {
+    'name': 'OpenStack OAUTH1 API',
+    'namespace': 'http://docs.openstack.org/identity/api/ext/'
+                 'OS-OAUTH1/v1.0',
+    'alias': 'OS-OAUTH1',
+    'updated': '2013-07-07T12:00:0-00:00',
+    'description': 'OpenStack OAuth 1.0a Delegated Auth Mechanism.',
+    'links': [
+        {
+            'rel': 'describedby',
+            # TODO(dolph): link needs to be revised after
+            #              bug 928059 merges
+            'type': 'text/html',
+            'href': 'https://github.com/openstack/identity-api',
+        }
+    ]}
+extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
+extension.register_public_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
+
+
+def filter_consumer(consumer_ref):
+    """Filter out private items in a consumer dict.
+
+    'secret' is never returned.
+
+    :returns: consumer_ref
+
+    """
+    if consumer_ref:
+        consumer_ref = consumer_ref.copy()
+        consumer_ref.pop('secret', None)
+    return consumer_ref
+
+
+def filter_token(access_token_ref):
+    """Filter out private items in an access token dict.
+
+    'access_secret' is never returned.
+
+    :returns: access_token_ref
+
+    """
+    if access_token_ref:
+        access_token_ref = access_token_ref.copy()
+        access_token_ref.pop('access_secret', None)
+    return access_token_ref
+
+
+def get_oauth_headers(headers):
+    parameters = {}
+
+    # The incoming headers variable is your usual heading from context
+    # In an OAuth signed req, where the oauth variables are in the header,
+    # they with the key 'Authorization'.
+
+    if headers and 'Authorization' in headers:
+        # A typical value for Authorization is seen below
+        # 'OAuth realm="", oauth_body_hash="2jm%3D", oauth_nonce="14475435"
+        # along with other oauth variables, the 'OAuth ' part is trimmed
+        # to split the rest of the headers.
+
+        auth_header = headers['Authorization']
+        params = oauth1.rfc5849.utils.parse_authorization_header(auth_header)
+        parameters.update(dict(params))
+        return parameters
+    else:
+        msg = _LE('Cannot retrieve Authorization headers')
+        LOG.error(msg)
+        raise exception.OAuthHeadersMissingError()
+
+
+def extract_non_oauth_params(query_string):
+    params = oauthlib.common.extract_params(query_string)
+    return {k: v for k, v in params if not k.startswith('oauth_')}
+
+
+@dependency.provider('oauth_api')
+class Manager(manager.Manager):
+    """Default pivot point for the OAuth1 backend.
+
+    See :mod:`keystone.common.manager.Manager` for more details on how this
+    dynamically calls the backend.
+
+    """
+    _ACCESS_TOKEN = "OS-OAUTH1:access_token"
+    _REQUEST_TOKEN = "OS-OAUTH1:request_token"
+    _CONSUMER = "OS-OAUTH1:consumer"
+
+    def __init__(self):
+        super(Manager, self).__init__(CONF.oauth1.driver)
+
+    def create_consumer(self, consumer_ref, initiator=None):
+        ret = self.driver.create_consumer(consumer_ref)
+        notifications.Audit.created(self._CONSUMER, ret['id'], initiator)
+        return ret
+
+    def update_consumer(self, consumer_id, consumer_ref, initiator=None):
+        ret = self.driver.update_consumer(consumer_id, consumer_ref)
+        notifications.Audit.updated(self._CONSUMER, consumer_id, initiator)
+        return ret
+
+    def delete_consumer(self, consumer_id, initiator=None):
+        ret = self.driver.delete_consumer(consumer_id)
+        notifications.Audit.deleted(self._CONSUMER, consumer_id, initiator)
+        return ret
+
+    def create_access_token(self, request_id, access_token_duration,
+                            initiator=None):
+        ret = self.driver.create_access_token(request_id,
+                                              access_token_duration)
+        notifications.Audit.created(self._ACCESS_TOKEN, ret['id'], initiator)
+        return ret
+
+    def delete_access_token(self, user_id, access_token_id, initiator=None):
+        ret = self.driver.delete_access_token(user_id, access_token_id)
+        notifications.Audit.deleted(self._ACCESS_TOKEN, access_token_id,
+                                    initiator)
+        return ret
+
+    def create_request_token(self, consumer_id, requested_project,
+                             request_token_duration, initiator=None):
+        ret = self.driver.create_request_token(
+            consumer_id, requested_project, request_token_duration)
+        notifications.Audit.created(self._REQUEST_TOKEN, ret['id'],
+                                    initiator)
+        return ret
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Driver(object):
+    """Interface description for an OAuth1 driver."""
+
+    @abc.abstractmethod
+    def create_consumer(self, consumer_ref):
+        """Create consumer.
+
+        :param consumer_ref: consumer ref with consumer name
+        :type consumer_ref: dict
+        :returns: consumer_ref
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def update_consumer(self, consumer_id, consumer_ref):
+        """Update consumer.
+
+        :param consumer_id: id of consumer to update
+        :type consumer_id: string
+        :param consumer_ref: new consumer ref with consumer name
+        :type consumer_ref: dict
+        :returns: consumer_ref
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_consumers(self):
+        """List consumers.
+
+        :returns: list of consumers
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_consumer(self, consumer_id):
+        """Get consumer, returns the consumer id (key)
+        and description.
+
+        :param consumer_id: id of consumer to get
+        :type consumer_id: string
+        :returns: consumer_ref
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_consumer_with_secret(self, consumer_id):
+        """Like get_consumer() but returned consumer_ref includes
+        the consumer secret.
+
+        Secrets should only be shared upon consumer creation; the
+        consumer secret is required to verify incoming OAuth requests.
+
+        :param consumer_id: id of consumer to get
+        :type consumer_id: string
+        :returns: consumer_ref
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_consumer(self, consumer_id):
+        """Delete consumer.
+
+        :param consumer_id: id of consumer to get
+        :type consumer_id: string
+        :returns: None.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_access_tokens(self, user_id):
+        """List access tokens.
+
+        :param user_id: search for access tokens authorized by given user id
+        :type user_id: string
+        :returns: list of access tokens the user has authorized
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_access_token(self, user_id, access_token_id):
+        """Delete access token.
+
+        :param user_id: authorizing user id
+        :type user_id: string
+        :param access_token_id: access token to delete
+        :type access_token_id: string
+        :returns: None
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def create_request_token(self, consumer_id, requested_project,
+                             request_token_duration):
+        """Create request token.
+
+        :param consumer_id: the id of the consumer
+        :type consumer_id: string
+        :param requested_project_id: requested project id
+        :type requested_project_id: string
+        :param request_token_duration: duration of request token
+        :type request_token_duration: string
+        :returns: request_token_ref
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_request_token(self, request_token_id):
+        """Get request token.
+
+        :param request_token_id: the id of the request token
+        :type request_token_id: string
+        :returns: request_token_ref
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_access_token(self, access_token_id):
+        """Get access token.
+
+        :param access_token_id: the id of the access token
+        :type access_token_id: string
+        :returns: access_token_ref
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def authorize_request_token(self, request_id, user_id, role_ids):
+        """Authorize request token.
+
+        :param request_id: the id of the request token, to be authorized
+        :type request_id: string
+        :param user_id: the id of the authorizing user
+        :type user_id: string
+        :param role_ids: list of role ids to authorize
+        :type role_ids: list
+        :returns: verifier
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def create_access_token(self, request_id, access_token_duration):
+        """Create access token.
+
+        :param request_id: the id of the request token, to be deleted
+        :type request_id: string
+        :param access_token_duration: duration of an access token
+        :type access_token_duration: string
+        :returns: access_token_ref
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
diff --git a/keystone-moon/keystone/contrib/oauth1/migrate_repo/__init__.py b/keystone-moon/keystone/contrib/oauth1/migrate_repo/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/contrib/oauth1/migrate_repo/migrate.cfg b/keystone-moon/keystone/contrib/oauth1/migrate_repo/migrate.cfg
new file mode 100644 (file)
index 0000000..97ca781
--- /dev/null
@@ -0,0 +1,25 @@
+[db_settings]
+# Used to identify which repository this database is versioned under.
+# You can use the name of your project.
+repository_id=oauth1
+
+# The name of the database table used to track the schema version.
+# This name shouldn't already be used by your project.
+# If this is changed once a database is under version control, you'll need to
+# change the table name in each database too.
+version_table=migrate_version
+
+# When committing a change script, Migrate will attempt to generate the
+# sql for all supported databases; normally, if one of them fails - probably
+# because you don't have that database installed - it is ignored and the
+# commit continues, perhaps ending successfully.
+# Databases in this list MUST compile successfully during a commit, or the
+# entire commit will fail. List the databases your application will actually
+# be using to ensure your updates to that database work properly.
+# This must be a list; example: ['postgres','sqlite']
+required_dbs=[]
+
+# When creating new change scripts, Migrate will stamp the new script with
+# a version number. By default this is latest_version + 1. You can set this
+# to 'true' to tell Migrate to use the UTC timestamp instead.
+use_timestamp_numbering=False
diff --git a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py
new file mode 100644 (file)
index 0000000..a4fbf15
--- /dev/null
@@ -0,0 +1,67 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+
+def upgrade(migrate_engine):
+    # Upgrade operations go here. Don't create your own engine; bind
+    # migrate_engine to your metadata
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    consumer_table = sql.Table(
+        'consumer',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True, nullable=False),
+        sql.Column('description', sql.String(64), nullable=False),
+        sql.Column('secret', sql.String(64), nullable=False),
+        sql.Column('extra', sql.Text(), nullable=False))
+    consumer_table.create(migrate_engine, checkfirst=True)
+
+    request_token_table = sql.Table(
+        'request_token',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True, nullable=False),
+        sql.Column('request_secret', sql.String(64), nullable=False),
+        sql.Column('verifier', sql.String(64), nullable=True),
+        sql.Column('authorizing_user_id', sql.String(64), nullable=True),
+        sql.Column('requested_project_id', sql.String(64), nullable=False),
+        sql.Column('requested_roles', sql.Text(), nullable=False),
+        sql.Column('consumer_id', sql.String(64), nullable=False, index=True),
+        sql.Column('expires_at', sql.String(64), nullable=True))
+    request_token_table.create(migrate_engine, checkfirst=True)
+
+    access_token_table = sql.Table(
+        'access_token',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True, nullable=False),
+        sql.Column('access_secret', sql.String(64), nullable=False),
+        sql.Column('authorizing_user_id', sql.String(64),
+                   nullable=False, index=True),
+        sql.Column('project_id', sql.String(64), nullable=False),
+        sql.Column('requested_roles', sql.Text(), nullable=False),
+        sql.Column('consumer_id', sql.String(64), nullable=False),
+        sql.Column('expires_at', sql.String(64), nullable=True))
+    access_token_table.create(migrate_engine, checkfirst=True)
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+    # Operations to reverse the above upgrade go here.
+    tables = ['consumer', 'request_token', 'access_token']
+    for table_name in tables:
+        table = sql.Table(table_name, meta, autoload=True)
+        table.drop()
diff --git a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/002_fix_oauth_tables_fk.py b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/002_fix_oauth_tables_fk.py
new file mode 100644 (file)
index 0000000..d39df8d
--- /dev/null
@@ -0,0 +1,54 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+from keystone.common.sql import migration_helpers
+
+
+def upgrade(migrate_engine):
+    # Upgrade operations go here. Don't create your own engine; bind
+    # migrate_engine to your metadata
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    consumer_table = sql.Table('consumer', meta, autoload=True)
+    request_token_table = sql.Table('request_token', meta, autoload=True)
+    access_token_table = sql.Table('access_token', meta, autoload=True)
+
+    constraints = [{'table': request_token_table,
+                    'fk_column': 'consumer_id',
+                    'ref_column': consumer_table.c.id},
+                   {'table': access_token_table,
+                    'fk_column': 'consumer_id',
+                    'ref_column': consumer_table.c.id}]
+    if meta.bind != 'sqlite':
+        migration_helpers.add_constraints(constraints)
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+    consumer_table = sql.Table('consumer', meta, autoload=True)
+    request_token_table = sql.Table('request_token', meta, autoload=True)
+    access_token_table = sql.Table('access_token', meta, autoload=True)
+
+    constraints = [{'table': request_token_table,
+                    'fk_column': 'consumer_id',
+                    'ref_column': consumer_table.c.id},
+                   {'table': access_token_table,
+                    'fk_column': 'consumer_id',
+                    'ref_column': consumer_table.c.id}]
+    if migrate_engine.name != 'sqlite':
+        migration_helpers.remove_constraints(constraints)
diff --git a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/003_consumer_description_nullalbe.py b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/003_consumer_description_nullalbe.py
new file mode 100644 (file)
index 0000000..e1cf884
--- /dev/null
@@ -0,0 +1,29 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+
+def upgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+    user_table = sql.Table('consumer', meta, autoload=True)
+    user_table.c.description.alter(nullable=True)
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+    user_table = sql.Table('consumer', meta, autoload=True)
+    user_table.c.description.alter(nullable=False)
diff --git a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py
new file mode 100644 (file)
index 0000000..6f1e2e8
--- /dev/null
@@ -0,0 +1,35 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+
+def upgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+    request_token_table = sql.Table('request_token', meta, autoload=True)
+    request_token_table.c.requested_roles.alter(nullable=True)
+    request_token_table.c.requested_roles.alter(name="role_ids")
+    access_token_table = sql.Table('access_token', meta, autoload=True)
+    access_token_table.c.requested_roles.alter(name="role_ids")
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+    request_token_table = sql.Table('request_token', meta, autoload=True)
+    request_token_table.c.role_ids.alter(nullable=False)
+    request_token_table.c.role_ids.alter(name="requested_roles")
+    access_token_table = sql.Table('access_token', meta, autoload=True)
+    access_token_table.c.role_ids.alter(name="requested_roles")
diff --git a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/005_consumer_id_index.py b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/005_consumer_id_index.py
new file mode 100644 (file)
index 0000000..428971f
--- /dev/null
@@ -0,0 +1,42 @@
+# Copyright 2014 Mirantis.inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sa
+
+
+def upgrade(migrate_engine):
+
+    if migrate_engine.name == 'mysql':
+        meta = sa.MetaData(bind=migrate_engine)
+        table = sa.Table('access_token', meta, autoload=True)
+
+        # NOTE(i159): MySQL requires indexes on referencing columns, and those
+        # indexes create automatically. That those indexes will have different
+        # names, depending on version of MySQL used. We shoud make this naming
+        # consistent, by reverting index name to a consistent condition.
+        if any(i for i in table.indexes if i.columns.keys() == ['consumer_id']
+               and i.name != 'consumer_id'):
+            # NOTE(i159): by this action will be made re-creation of an index
+            # with the new name. This can be considered as renaming under the
+            # MySQL rules.
+            sa.Index('consumer_id', table.c.consumer_id).create()
+
+
+def downgrade(migrate_engine):
+    # NOTE(i159): index exists only in MySQL schemas, and got an inconsistent
+    # name only when MySQL 5.5 renamed it after re-creation
+    # (during migrations). So we just fixed inconsistency, there is no
+    # necessity to revert it.
+    pass
diff --git a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/__init__.py b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/contrib/oauth1/routers.py b/keystone-moon/keystone/contrib/oauth1/routers.py
new file mode 100644 (file)
index 0000000..35619ed
--- /dev/null
@@ -0,0 +1,154 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+
+from keystone.common import json_home
+from keystone.common import wsgi
+from keystone.contrib.oauth1 import controllers
+
+
+build_resource_relation = functools.partial(
+    json_home.build_v3_extension_resource_relation,
+    extension_name='OS-OAUTH1', extension_version='1.0')
+
+build_parameter_relation = functools.partial(
+    json_home.build_v3_extension_parameter_relation,
+    extension_name='OS-OAUTH1', extension_version='1.0')
+
+ACCESS_TOKEN_ID_PARAMETER_RELATION = build_parameter_relation(
+    parameter_name='access_token_id')
+
+
+class OAuth1Extension(wsgi.V3ExtensionRouter):
+    """API Endpoints for the OAuth1 extension.
+
+    The goal of this extension is to allow third-party service providers
+    to acquire tokens with a limited subset of a user's roles for acting
+    on behalf of that user. This is done using an oauth-similar flow and
+    api.
+
+    The API looks like::
+
+      # Basic admin-only consumer crud
+      POST /OS-OAUTH1/consumers
+      GET /OS-OAUTH1/consumers
+      PATCH /OS-OAUTH1/consumers/$consumer_id
+      GET /OS-OAUTH1/consumers/$consumer_id
+      DELETE /OS-OAUTH1/consumers/$consumer_id
+
+      # User access token crud
+      GET /users/$user_id/OS-OAUTH1/access_tokens
+      GET /users/$user_id/OS-OAUTH1/access_tokens/$access_token_id
+      GET /users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/roles
+      GET /users/{user_id}/OS-OAUTH1/access_tokens
+          /{access_token_id}/roles/{role_id}
+      DELETE /users/$user_id/OS-OAUTH1/access_tokens/$access_token_id
+
+      # OAuth interfaces
+      POST /OS-OAUTH1/request_token  # create a request token
+      PUT /OS-OAUTH1/authorize  # authorize a request token
+      POST /OS-OAUTH1/access_token  # create an access token
+
+    """
+
+    def add_routes(self, mapper):
+        consumer_controller = controllers.ConsumerCrudV3()
+        access_token_controller = controllers.AccessTokenCrudV3()
+        access_token_roles_controller = controllers.AccessTokenRolesV3()
+        oauth_controller = controllers.OAuthControllerV3()
+
+        # basic admin-only consumer crud
+        self._add_resource(
+            mapper, consumer_controller,
+            path='/OS-OAUTH1/consumers',
+            get_action='list_consumers',
+            post_action='create_consumer',
+            rel=build_resource_relation(resource_name='consumers'))
+        self._add_resource(
+            mapper, consumer_controller,
+            path='/OS-OAUTH1/consumers/{consumer_id}',
+            get_action='get_consumer',
+            patch_action='update_consumer',
+            delete_action='delete_consumer',
+            rel=build_resource_relation(resource_name='consumer'),
+            path_vars={
+                'consumer_id':
+                build_parameter_relation(parameter_name='consumer_id'),
+            })
+
+        # user access token crud
+        self._add_resource(
+            mapper, access_token_controller,
+            path='/users/{user_id}/OS-OAUTH1/access_tokens',
+            get_action='list_access_tokens',
+            rel=build_resource_relation(resource_name='user_access_tokens'),
+            path_vars={
+                'user_id': json_home.Parameters.USER_ID,
+            })
+        self._add_resource(
+            mapper, access_token_controller,
+            path='/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}',
+            get_action='get_access_token',
+            delete_action='delete_access_token',
+            rel=build_resource_relation(resource_name='user_access_token'),
+            path_vars={
+                'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION,
+                'user_id': json_home.Parameters.USER_ID,
+            })
+        self._add_resource(
+            mapper, access_token_roles_controller,
+            path='/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/'
+            'roles',
+            get_action='list_access_token_roles',
+            rel=build_resource_relation(
+                resource_name='user_access_token_roles'),
+            path_vars={
+                'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION,
+                'user_id': json_home.Parameters.USER_ID,
+            })
+        self._add_resource(
+            mapper, access_token_roles_controller,
+            path='/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/'
+            'roles/{role_id}',
+            get_action='get_access_token_role',
+            rel=build_resource_relation(
+                resource_name='user_access_token_role'),
+            path_vars={
+                'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION,
+                'role_id': json_home.Parameters.ROLE_ID,
+                'user_id': json_home.Parameters.USER_ID,
+            })
+
+        # oauth flow calls
+        self._add_resource(
+            mapper, oauth_controller,
+            path='/OS-OAUTH1/request_token',
+            post_action='create_request_token',
+            rel=build_resource_relation(resource_name='request_tokens'))
+        self._add_resource(
+            mapper, oauth_controller,
+            path='/OS-OAUTH1/access_token',
+            post_action='create_access_token',
+            rel=build_resource_relation(resource_name='access_tokens'))
+        self._add_resource(
+            mapper, oauth_controller,
+            path='/OS-OAUTH1/authorize/{request_token_id}',
+            path_vars={
+                'request_token_id':
+                build_parameter_relation(parameter_name='request_token_id')
+            },
+            put_action='authorize_request_token',
+            rel=build_resource_relation(
+                resource_name='authorize_request_token'))
diff --git a/keystone-moon/keystone/contrib/oauth1/validator.py b/keystone-moon/keystone/contrib/oauth1/validator.py
new file mode 100644 (file)
index 0000000..8f44059
--- /dev/null
@@ -0,0 +1,179 @@
+# Copyright 2014 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""oAuthlib request validator."""
+
+from oslo_log import log
+import six
+
+from keystone.common import dependency
+from keystone.contrib.oauth1 import core as oauth1
+from keystone import exception
+
+
+METHOD_NAME = 'oauth_validator'
+LOG = log.getLogger(__name__)
+
+
+@dependency.requires('oauth_api')
+class OAuthValidator(oauth1.RequestValidator):
+
+    # TODO(mhu) set as option probably?
+    @property
+    def enforce_ssl(self):
+        return False
+
+    @property
+    def safe_characters(self):
+        # oauth tokens are generated from a uuid hex value
+        return set("abcdef0123456789")
+
+    def _check_token(self, token):
+        # generic token verification when they're obtained from a uuid hex
+        return (set(token) <= self.safe_characters and
+                len(token) == 32)
+
+    def check_client_key(self, client_key):
+        return self._check_token(client_key)
+
+    def check_request_token(self, request_token):
+        return self._check_token(request_token)
+
+    def check_access_token(self, access_token):
+        return self._check_token(access_token)
+
+    def check_nonce(self, nonce):
+        # Assuming length is not a concern
+        return set(nonce) <= self.safe_characters
+
+    def check_verifier(self, verifier):
+        return (all(i in oauth1.VERIFIER_CHARS for i in verifier) and
+                len(verifier) == 8)
+
+    def get_client_secret(self, client_key, request):
+        client = self.oauth_api.get_consumer_with_secret(client_key)
+        return client['secret']
+
+    def get_request_token_secret(self, client_key, token, request):
+        token_ref = self.oauth_api.get_request_token(token)
+        return token_ref['request_secret']
+
+    def get_access_token_secret(self, client_key, token, request):
+        access_token = self.oauth_api.get_access_token(token)
+        return access_token['access_secret']
+
+    def get_default_realms(self, client_key, request):
+        # realms weren't implemented with the previous library
+        return []
+
+    def get_realms(self, token, request):
+        return []
+
+    def get_redirect_uri(self, token, request):
+        # OOB (out of band) is supposed to be the default value to use
+        return 'oob'
+
+    def get_rsa_key(self, client_key, request):
+        # HMAC signing is used, so return a dummy value
+        return ''
+
+    def invalidate_request_token(self, client_key, request_token, request):
+        # this method is invoked when an access token is generated out of a
+        # request token, to make sure that request token cannot be consumed
+        # anymore. This is done in the backend, so we do nothing here.
+        pass
+
+    def validate_client_key(self, client_key, request):
+        try:
+            return self.oauth_api.get_consumer(client_key) is not None
+        except exception.NotFound:
+            return False
+
+    def validate_request_token(self, client_key, token, request):
+        try:
+            return self.oauth_api.get_request_token(token) is not None
+        except exception.NotFound:
+            return False
+
+    def validate_access_token(self, client_key, token, request):
+        try:
+            return self.oauth_api.get_access_token(token) is not None
+        except exception.NotFound:
+            return False
+
+    def validate_timestamp_and_nonce(self,
+                                     client_key,
+                                     timestamp,
+                                     nonce,
+                                     request,
+                                     request_token=None,
+                                     access_token=None):
+        return True
+
+    def validate_redirect_uri(self, client_key, redirect_uri, request):
+        # we expect OOB, we don't really care
+        return True
+
+    def validate_requested_realms(self, client_key, realms, request):
+        # realms are not used
+        return True
+
+    def validate_realms(self,
+                        client_key,
+                        token,
+                        request,
+                        uri=None,
+                        realms=None):
+        return True
+
+    def validate_verifier(self, client_key, token, verifier, request):
+        try:
+            req_token = self.oauth_api.get_request_token(token)
+            return req_token['verifier'] == verifier
+        except exception.NotFound:
+            return False
+
+    def verify_request_token(self, token, request):
+        # there aren't strong expectations on the request token format
+        return isinstance(token, six.string_types)
+
+    def verify_realms(self, token, realms, request):
+        return True
+
+    # The following save_XXX methods are called to create tokens. I chose to
+    # keep the original logic, but the comments below show how that could be
+    # implemented. The real implementation logic is in the backend.
+    def save_access_token(self, token, request):
+        pass
+#        token_duration = CONF.oauth1.request_token_duration
+#        request_token_id = request.client_key
+#        self.oauth_api.create_access_token(request_token_id,
+#                                           token_duration,
+#                                           token["oauth_token"],
+#                                           token["oauth_token_secret"])
+
+    def save_request_token(self, token, request):
+        pass
+#        project_id = request.headers.get('Requested-Project-Id')
+#        token_duration = CONF.oauth1.request_token_duration
+#        self.oauth_api.create_request_token(request.client_key,
+#                                            project_id,
+#                                            token_duration,
+#                                            token["oauth_token"],
+#                                            token["oauth_token_secret"])
+
+    def save_verifier(self, token, verifier, request):
+        # keep the old logic for this, as it is done in two steps and requires
+        # information that the request validator has no access to
+        pass
diff --git a/keystone-moon/keystone/contrib/revoke/__init__.py b/keystone-moon/keystone/contrib/revoke/__init__.py
new file mode 100644 (file)
index 0000000..58ba68d
--- /dev/null
@@ -0,0 +1,13 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.contrib.revoke.core import *  # noqa
diff --git a/keystone-moon/keystone/contrib/revoke/backends/__init__.py b/keystone-moon/keystone/contrib/revoke/backends/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/contrib/revoke/backends/kvs.py b/keystone-moon/keystone/contrib/revoke/backends/kvs.py
new file mode 100644 (file)
index 0000000..cc41fbe
--- /dev/null
@@ -0,0 +1,73 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from oslo_config import cfg
+from oslo_utils import timeutils
+
+from keystone.common import kvs
+from keystone.contrib import revoke
+from keystone import exception
+from keystone.openstack.common import versionutils
+
+
+CONF = cfg.CONF
+
+_EVENT_KEY = 'os-revoke-events'
+_KVS_BACKEND = 'openstack.kvs.Memory'
+
+
+class Revoke(revoke.Driver):
+
+    @versionutils.deprecated(
+        versionutils.deprecated.JUNO,
+        in_favor_of='keystone.contrib.revoke.backends.sql',
+        remove_in=+1,
+        what='keystone.contrib.revoke.backends.kvs')
+    def __init__(self, **kwargs):
+        super(Revoke, self).__init__()
+        self._store = kvs.get_key_value_store('os-revoke-driver')
+        self._store.configure(backing_store=_KVS_BACKEND, **kwargs)
+
+    def _list_events(self):
+        try:
+            return self._store.get(_EVENT_KEY)
+        except exception.NotFound:
+            return []
+
+    def _prune_expired_events_and_get(self, last_fetch=None, new_event=None):
+        pruned = []
+        results = []
+        expire_delta = datetime.timedelta(seconds=CONF.token.expiration)
+        oldest = timeutils.utcnow() - expire_delta
+        # TODO(ayoung): Store the time of the oldest event so that the
+        # prune process can be skipped if none of the events have timed out.
+        with self._store.get_lock(_EVENT_KEY) as lock:
+            events = self._list_events()
+            if new_event is not None:
+                events.append(new_event)
+
+            for event in events:
+                revoked_at = event.revoked_at
+                if revoked_at > oldest:
+                    pruned.append(event)
+                    if last_fetch is None or revoked_at > last_fetch:
+                        results.append(event)
+            self._store.set(_EVENT_KEY, pruned, lock)
+        return results
+
+    def list_events(self, last_fetch=None):
+        return self._prune_expired_events_and_get(last_fetch=last_fetch)
+
+    def revoke(self, event):
+        self._prune_expired_events_and_get(new_event=event)
diff --git a/keystone-moon/keystone/contrib/revoke/backends/sql.py b/keystone-moon/keystone/contrib/revoke/backends/sql.py
new file mode 100644 (file)
index 0000000..1b0cde1
--- /dev/null
@@ -0,0 +1,104 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from keystone.common import sql
+from keystone.contrib import revoke
+from keystone.contrib.revoke import model
+
+
+class RevocationEvent(sql.ModelBase, sql.ModelDictMixin):
+    __tablename__ = 'revocation_event'
+    attributes = model.REVOKE_KEYS
+
+    # The id field is not going to be exposed to the outside world.
+    # It is, however, necessary for SQLAlchemy.
+    id = sql.Column(sql.String(64), primary_key=True)
+    domain_id = sql.Column(sql.String(64))
+    project_id = sql.Column(sql.String(64))
+    user_id = sql.Column(sql.String(64))
+    role_id = sql.Column(sql.String(64))
+    trust_id = sql.Column(sql.String(64))
+    consumer_id = sql.Column(sql.String(64))
+    access_token_id = sql.Column(sql.String(64))
+    issued_before = sql.Column(sql.DateTime(), nullable=False)
+    expires_at = sql.Column(sql.DateTime())
+    revoked_at = sql.Column(sql.DateTime(), nullable=False)
+    audit_id = sql.Column(sql.String(32))
+    audit_chain_id = sql.Column(sql.String(32))
+
+
+class Revoke(revoke.Driver):
+    def _flush_batch_size(self, dialect):
+        batch_size = 0
+        if dialect == 'ibm_db_sa':
+            # This functionality is limited to DB2, because
+            # it is necessary to prevent the transaction log
+            # from filling up, whereas at least some of the
+            # other supported databases do not support update
+            # queries with LIMIT subqueries nor do they appear
+            # to require the use of such queries when deleting
+            # large numbers of records at once.
+            batch_size = 100
+            # Limit of 100 is known to not fill a transaction log
+            # of default maximum size while not significantly
+            # impacting the performance of large token purges on
+            # systems where the maximum transaction log size has
+            # been increased beyond the default.
+        return batch_size
+
+    def _prune_expired_events(self):
+        oldest = revoke.revoked_before_cutoff_time()
+
+        session = sql.get_session()
+        dialect = session.bind.dialect.name
+        batch_size = self._flush_batch_size(dialect)
+        if batch_size > 0:
+            query = session.query(RevocationEvent.id)
+            query = query.filter(RevocationEvent.revoked_at < oldest)
+            query = query.limit(batch_size).subquery()
+            delete_query = (session.query(RevocationEvent).
+                            filter(RevocationEvent.id.in_(query)))
+            while True:
+                rowcount = delete_query.delete(synchronize_session=False)
+                if rowcount == 0:
+                    break
+        else:
+            query = session.query(RevocationEvent)
+            query = query.filter(RevocationEvent.revoked_at < oldest)
+            query.delete(synchronize_session=False)
+
+        session.flush()
+
+    def list_events(self, last_fetch=None):
+        self._prune_expired_events()
+        session = sql.get_session()
+        query = session.query(RevocationEvent).order_by(
+            RevocationEvent.revoked_at)
+
+        if last_fetch:
+            query = query.filter(RevocationEvent.revoked_at > last_fetch)
+
+        events = [model.RevokeEvent(**e.to_dict()) for e in query]
+
+        return events
+
+    def revoke(self, event):
+        kwargs = dict()
+        for attr in model.REVOKE_KEYS:
+            kwargs[attr] = getattr(event, attr)
+        kwargs['id'] = uuid.uuid4().hex
+        record = RevocationEvent(**kwargs)
+        session = sql.get_session()
+        with session.begin():
+            session.add(record)
diff --git a/keystone-moon/keystone/contrib/revoke/controllers.py b/keystone-moon/keystone/contrib/revoke/controllers.py
new file mode 100644 (file)
index 0000000..40151ba
--- /dev/null
@@ -0,0 +1,44 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_utils import timeutils
+
+from keystone.common import controller
+from keystone.common import dependency
+from keystone import exception
+from keystone.i18n import _
+
+
+@dependency.requires('revoke_api')
+class RevokeController(controller.V3Controller):
+    @controller.protected()
+    def list_revoke_events(self, context):
+        since = context['query_string'].get('since')
+        last_fetch = None
+        if since:
+            try:
+                last_fetch = timeutils.normalize_time(
+                    timeutils.parse_isotime(since))
+            except ValueError:
+                raise exception.ValidationError(
+                    message=_('invalid date format %s') % since)
+        events = self.revoke_api.list_events(last_fetch=last_fetch)
+        # Build the links by hand as the standard controller calls require ids
+        response = {'events': [event.to_dict() for event in events],
+                    'links': {
+                        'next': None,
+                        'self': RevokeController.base_url(
+                            context,
+                            path=context['path']),
+                        'previous': None}
+                    }
+        return response
diff --git a/keystone-moon/keystone/contrib/revoke/core.py b/keystone-moon/keystone/contrib/revoke/core.py
new file mode 100644 (file)
index 0000000..c733569
--- /dev/null
@@ -0,0 +1,250 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+import datetime
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import timeutils
+import six
+
+from keystone.common import cache
+from keystone.common import dependency
+from keystone.common import extension
+from keystone.common import manager
+from keystone.contrib.revoke import model
+from keystone import exception
+from keystone.i18n import _
+from keystone import notifications
+from keystone.openstack.common import versionutils
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+EXTENSION_DATA = {
+    'name': 'OpenStack Revoke API',
+    'namespace': 'http://docs.openstack.org/identity/api/ext/'
+                 'OS-REVOKE/v1.0',
+    'alias': 'OS-REVOKE',
+    'updated': '2014-02-24T20:51:0-00:00',
+    'description': 'OpenStack revoked token reporting mechanism.',
+    'links': [
+        {
+            'rel': 'describedby',
+            'type': 'text/html',
+            'href': ('https://github.com/openstack/identity-api/blob/master/'
+                     'openstack-identity-api/v3/src/markdown/'
+                     'identity-api-v3-os-revoke-ext.md'),
+        }
+    ]}
+extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
+extension.register_public_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
+
+MEMOIZE = cache.get_memoization_decorator(section='revoke')
+
+
+def revoked_before_cutoff_time():
+    expire_delta = datetime.timedelta(
+        seconds=CONF.token.expiration + CONF.revoke.expiration_buffer)
+    oldest = timeutils.utcnow() - expire_delta
+    return oldest
+
+
+@dependency.provider('revoke_api')
+class Manager(manager.Manager):
+    """Revoke API Manager.
+
+    Performs common logic for recording revocations.
+
+    """
+
+    def __init__(self):
+        super(Manager, self).__init__(CONF.revoke.driver)
+        self._register_listeners()
+        self.model = model
+
+    def _user_callback(self, service, resource_type, operation,
+                       payload):
+        self.revoke_by_user(payload['resource_info'])
+
+    def _role_callback(self, service, resource_type, operation,
+                       payload):
+        self.revoke(
+            model.RevokeEvent(role_id=payload['resource_info']))
+
+    def _project_callback(self, service, resource_type, operation,
+                          payload):
+        self.revoke(
+            model.RevokeEvent(project_id=payload['resource_info']))
+
+    def _domain_callback(self, service, resource_type, operation,
+                         payload):
+        self.revoke(
+            model.RevokeEvent(domain_id=payload['resource_info']))
+
+    def _trust_callback(self, service, resource_type, operation,
+                        payload):
+        self.revoke(
+            model.RevokeEvent(trust_id=payload['resource_info']))
+
+    def _consumer_callback(self, service, resource_type, operation,
+                           payload):
+        self.revoke(
+            model.RevokeEvent(consumer_id=payload['resource_info']))
+
+    def _access_token_callback(self, service, resource_type, operation,
+                               payload):
+        self.revoke(
+            model.RevokeEvent(access_token_id=payload['resource_info']))
+
+    def _group_callback(self, service, resource_type, operation, payload):
+        user_ids = (u['id'] for u in self.identity_api.list_users_in_group(
+            payload['resource_info']))
+        for uid in user_ids:
+            self.revoke(model.RevokeEvent(user_id=uid))
+
+    def _register_listeners(self):
+        callbacks = {
+            notifications.ACTIONS.deleted: [
+                ['OS-TRUST:trust', self._trust_callback],
+                ['OS-OAUTH1:consumer', self._consumer_callback],
+                ['OS-OAUTH1:access_token', self._access_token_callback],
+                ['role', self._role_callback],
+                ['user', self._user_callback],
+                ['project', self._project_callback],
+            ],
+            notifications.ACTIONS.disabled: [
+                ['user', self._user_callback],
+                ['project', self._project_callback],
+                ['domain', self._domain_callback],
+            ],
+            notifications.ACTIONS.internal: [
+                [notifications.INVALIDATE_USER_TOKEN_PERSISTENCE,
+                 self._user_callback],
+            ]
+        }
+
+        for event, cb_info in six.iteritems(callbacks):
+            for resource_type, callback_fns in cb_info:
+                notifications.register_event_callback(event, resource_type,
+                                                      callback_fns)
+
+    def revoke_by_user(self, user_id):
+        return self.revoke(model.RevokeEvent(user_id=user_id))
+
+    def _assert_not_domain_and_project_scoped(self, domain_id=None,
+                                              project_id=None):
+        if domain_id is not None and project_id is not None:
+            msg = _('The revoke call must not have both domain_id and '
+                    'project_id. This is a bug in the Keystone server. The '
+                    'current request is aborted.')
+            raise exception.UnexpectedError(exception=msg)
+
+    @versionutils.deprecated(as_of=versionutils.deprecated.JUNO,
+                             remove_in=0)
+    def revoke_by_expiration(self, user_id, expires_at,
+                             domain_id=None, project_id=None):
+
+        self._assert_not_domain_and_project_scoped(domain_id=domain_id,
+                                                   project_id=project_id)
+
+        self.revoke(
+            model.RevokeEvent(user_id=user_id,
+                              expires_at=expires_at,
+                              domain_id=domain_id,
+                              project_id=project_id))
+
+    def revoke_by_audit_id(self, audit_id):
+        self.revoke(model.RevokeEvent(audit_id=audit_id))
+
+    def revoke_by_audit_chain_id(self, audit_chain_id, project_id=None,
+                                 domain_id=None):
+
+        self._assert_not_domain_and_project_scoped(domain_id=domain_id,
+                                                   project_id=project_id)
+
+        self.revoke(model.RevokeEvent(audit_chain_id=audit_chain_id,
+                                      domain_id=domain_id,
+                                      project_id=project_id))
+
+    def revoke_by_grant(self, role_id, user_id=None,
+                        domain_id=None, project_id=None):
+        self.revoke(
+            model.RevokeEvent(user_id=user_id,
+                              role_id=role_id,
+                              domain_id=domain_id,
+                              project_id=project_id))
+
+    def revoke_by_user_and_project(self, user_id, project_id):
+        self.revoke(
+            model.RevokeEvent(project_id=project_id, user_id=user_id))
+
+    def revoke_by_project_role_assignment(self, project_id, role_id):
+        self.revoke(model.RevokeEvent(project_id=project_id, role_id=role_id))
+
+    def revoke_by_domain_role_assignment(self, domain_id, role_id):
+        self.revoke(model.RevokeEvent(domain_id=domain_id, role_id=role_id))
+
+    @MEMOIZE
+    def _get_revoke_tree(self):
+        events = self.driver.list_events()
+        revoke_tree = model.RevokeTree(revoke_events=events)
+
+        return revoke_tree
+
+    def check_token(self, token_values):
+        """Checks the values from a token against the revocation list
+
+        :param  token_values: dictionary of values from a token,
+         normalized for differences between v2 and v3. The checked values are a
+         subset of the attributes of model.TokenEvent
+
+        :raises exception.TokenNotFound: if the token is invalid
+
+         """
+        if self._get_revoke_tree().is_revoked(token_values):
+            raise exception.TokenNotFound(_('Failed to validate token'))
+
+    def revoke(self, event):
+        self.driver.revoke(event)
+        self._get_revoke_tree.invalidate(self)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Driver(object):
+    """Interface for recording and reporting revocation events."""
+
+    @abc.abstractmethod
+    def list_events(self, last_fetch=None):
+        """return the revocation events, as a list of objects
+
+        :param last_fetch:   Time of last fetch.  Return all events newer.
+        :returns: A list of keystone.contrib.revoke.model.RevokeEvent
+                  newer than `last_fetch.`
+                  If no last_fetch is specified, returns all events
+                  for tokens issued after the expiration cutoff.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def revoke(self, event):
+        """register a revocation event
+
+        :param event: An instance of
+            keystone.contrib.revoke.model.RevocationEvent
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
diff --git a/keystone-moon/keystone/contrib/revoke/migrate_repo/__init__.py b/keystone-moon/keystone/contrib/revoke/migrate_repo/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/contrib/revoke/migrate_repo/migrate.cfg b/keystone-moon/keystone/contrib/revoke/migrate_repo/migrate.cfg
new file mode 100644 (file)
index 0000000..0e61bca
--- /dev/null
@@ -0,0 +1,25 @@
+[db_settings]
+# Used to identify which repository this database is versioned under.
+# You can use the name of your project.
+repository_id=revoke
+
+# The name of the database table used to track the schema version.
+# This name shouldn't already be used by your project.
+# If this is changed once a database is under version control, you'll need to
+# change the table name in each database too.
+version_table=migrate_version
+
+# When committing a change script, Migrate will attempt to generate the
+# sql for all supported databases; normally, if one of them fails - probably
+# because you don't have that database installed - it is ignored and the
+# commit continues, perhaps ending successfully.
+# Databases in this list MUST compile successfully during a commit, or the
+# entire commit will fail. List the databases your application will actually
+# be using to ensure your updates to that database work properly.
+# This must be a list; example: ['postgres','sqlite']
+required_dbs=[]
+
+# When creating new change scripts, Migrate will stamp the new script with
+# a version number. By default this is latest_version + 1. You can set this
+# to 'true' to tell Migrate to use the UTC timestamp instead.
+use_timestamp_numbering=False
diff --git a/keystone-moon/keystone/contrib/revoke/migrate_repo/versions/001_revoke_table.py b/keystone-moon/keystone/contrib/revoke/migrate_repo/versions/001_revoke_table.py
new file mode 100644 (file)
index 0000000..7927ce0
--- /dev/null
@@ -0,0 +1,47 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+
+def upgrade(migrate_engine):
+    # Upgrade operations go here. Don't create your own engine; bind
+    # migrate_engine to your metadata
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    service_table = sql.Table(
+        'revocation_event',
+        meta,
+        sql.Column('id', sql.String(64), primary_key=True),
+        sql.Column('domain_id', sql.String(64)),
+        sql.Column('project_id', sql.String(64)),
+        sql.Column('user_id', sql.String(64)),
+        sql.Column('role_id', sql.String(64)),
+        sql.Column('trust_id', sql.String(64)),
+        sql.Column('consumer_id', sql.String(64)),
+        sql.Column('access_token_id', sql.String(64)),
+        sql.Column('issued_before', sql.DateTime(), nullable=False),
+        sql.Column('expires_at', sql.DateTime()),
+        sql.Column('revoked_at', sql.DateTime(), index=True, nullable=False))
+    service_table.create(migrate_engine, checkfirst=True)
+
+
+def downgrade(migrate_engine):
+    # Operations to reverse the above upgrade go here.
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    tables = ['revocation_event']
+    for t in tables:
+        table = sql.Table(t, meta, autoload=True)
+        table.drop(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/contrib/revoke/migrate_repo/versions/002_add_audit_id_and_chain_to_revoke_table.py b/keystone-moon/keystone/contrib/revoke/migrate_repo/versions/002_add_audit_id_and_chain_to_revoke_table.py
new file mode 100644 (file)
index 0000000..bee6fb2
--- /dev/null
@@ -0,0 +1,37 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+
+_TABLE_NAME = 'revocation_event'
+
+
+def upgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    event_table = sql.Table(_TABLE_NAME, meta, autoload=True)
+    audit_id_column = sql.Column('audit_id', sql.String(32), nullable=True)
+    audit_chain_column = sql.Column('audit_chain_id', sql.String(32),
+                                    nullable=True)
+    event_table.create_column(audit_id_column)
+    event_table.create_column(audit_chain_column)
+
+
+def downgrade(migrate_engine):
+    meta = sql.MetaData()
+    meta.bind = migrate_engine
+
+    event_table = sql.Table(_TABLE_NAME, meta, autoload=True)
+    event_table.drop_column('audit_id')
+    event_table.drop_column('audit_chain_id')
diff --git a/keystone-moon/keystone/contrib/revoke/migrate_repo/versions/__init__.py b/keystone-moon/keystone/contrib/revoke/migrate_repo/versions/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/contrib/revoke/model.py b/keystone-moon/keystone/contrib/revoke/model.py
new file mode 100644 (file)
index 0000000..5e92042
--- /dev/null
@@ -0,0 +1,365 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_utils import timeutils
+
+
+# The set of attributes common between the RevokeEvent
+# and the dictionaries created from the token Data.
+_NAMES = ['trust_id',
+          'consumer_id',
+          'access_token_id',
+          'audit_id',
+          'audit_chain_id',
+          'expires_at',
+          'domain_id',
+          'project_id',
+          'user_id',
+          'role_id']
+
+
+# Additional arguments for creating a RevokeEvent
+_EVENT_ARGS = ['issued_before', 'revoked_at']
+
+# Names of attributes in the RevocationEvent, including "virtual" attributes.
+# Virtual attributes are those added based on other values.
+_EVENT_NAMES = _NAMES + ['domain_scope_id']
+
+# Values that will be in the token data but not in the event.
+# These will compared with event values that have different names.
+# For example: both trustor_id and trustee_id are compared against user_id
+_TOKEN_KEYS = ['identity_domain_id',
+               'assignment_domain_id',
+               'issued_at',
+               'trustor_id',
+               'trustee_id']
+
+
+REVOKE_KEYS = _NAMES + _EVENT_ARGS
+
+
+def blank_token_data(issued_at):
+    token_data = dict()
+    for name in _NAMES:
+        token_data[name] = None
+    for name in _TOKEN_KEYS:
+        token_data[name] = None
+    # required field
+    token_data['issued_at'] = issued_at
+    return token_data
+
+
+class RevokeEvent(object):
+    def __init__(self, **kwargs):
+        for k in REVOKE_KEYS:
+            v = kwargs.get(k, None)
+            setattr(self, k, v)
+
+        if self.domain_id and self.expires_at:
+            # This is revoking a domain-scoped token.
+            self.domain_scope_id = self.domain_id
+            self.domain_id = None
+        else:
+            # This is revoking all tokens for a domain.
+            self.domain_scope_id = None
+
+        if self.expires_at is not None:
+            # Trim off the expiration time because MySQL timestamps are only
+            # accurate to the second.
+            self.expires_at = self.expires_at.replace(microsecond=0)
+
+        if self.revoked_at is None:
+            self.revoked_at = timeutils.utcnow()
+        if self.issued_before is None:
+            self.issued_before = self.revoked_at
+
+    def to_dict(self):
+        keys = ['user_id',
+                'role_id',
+                'domain_id',
+                'domain_scope_id',
+                'project_id',
+                'audit_id',
+                'audit_chain_id',
+                ]
+        event = {key: self.__dict__[key] for key in keys
+                 if self.__dict__[key] is not None}
+        if self.trust_id is not None:
+            event['OS-TRUST:trust_id'] = self.trust_id
+        if self.consumer_id is not None:
+            event['OS-OAUTH1:consumer_id'] = self.consumer_id
+        if self.consumer_id is not None:
+            event['OS-OAUTH1:access_token_id'] = self.access_token_id
+        if self.expires_at is not None:
+            event['expires_at'] = timeutils.isotime(self.expires_at)
+        if self.issued_before is not None:
+            event['issued_before'] = timeutils.isotime(self.issued_before,
+                                                       subsecond=True)
+        return event
+
+    def key_for_name(self, name):
+        return "%s=%s" % (name, getattr(self, name) or '*')
+
+
+def attr_keys(event):
+    return map(event.key_for_name, _EVENT_NAMES)
+
+
+class RevokeTree(object):
+    """Fast Revocation Checking Tree Structure
+
+    The Tree is an index to quickly match tokens against events.
+    Each node is a hashtable of key=value combinations from revocation events.
+    The
+
+    """
+
+    def __init__(self, revoke_events=None):
+        self.revoke_map = dict()
+        self.add_events(revoke_events)
+
+    def add_event(self, event):
+        """Updates the tree based on a revocation event.
+
+        Creates any necessary internal nodes in the tree corresponding to the
+        fields of the revocation event.  The leaf node will always be set to
+        the latest 'issued_before' for events that are otherwise identical.
+
+        :param:  Event to add to the tree
+
+        :returns:  the event that was passed in.
+
+        """
+        revoke_map = self.revoke_map
+        for key in attr_keys(event):
+            revoke_map = revoke_map.setdefault(key, {})
+        revoke_map['issued_before'] = max(
+            event.issued_before, revoke_map.get(
+                'issued_before', event.issued_before))
+        return event
+
+    def remove_event(self, event):
+        """Update the tree based on the removal of a Revocation Event
+
+        Removes empty nodes from the tree from the leaf back to the root.
+
+        If multiple events trace the same path, but have different
+        'issued_before' values, only the last is ever stored in the tree.
+        So only an exact match on 'issued_before' ever triggers a removal
+
+        :param: Event to remove from the tree
+
+        """
+        stack = []
+        revoke_map = self.revoke_map
+        for name in _EVENT_NAMES:
+            key = event.key_for_name(name)
+            nxt = revoke_map.get(key)
+            if nxt is None:
+                break
+            stack.append((revoke_map, key, nxt))
+            revoke_map = nxt
+        else:
+            if event.issued_before == revoke_map['issued_before']:
+                revoke_map.pop('issued_before')
+        for parent, key, child in reversed(stack):
+            if not any(child):
+                del parent[key]
+
+    def add_events(self, revoke_events):
+        return map(self.add_event, revoke_events or [])
+
+    def is_revoked(self, token_data):
+        """Check if a token matches the revocation event
+
+        Compare the values for each level of the tree with the values from
+        the token, accounting for attributes that have alternative
+        keys, and for wildcard matches.
+        if there is a match, continue down the tree.
+        if there is no match, exit early.
+
+        token_data is a map based on a flattened view of token.
+        The required fields are:
+
+           'expires_at','user_id', 'project_id', 'identity_domain_id',
+           'assignment_domain_id', 'trust_id', 'trustor_id', 'trustee_id'
+           'consumer_id', 'access_token_id'
+
+        """
+        # Alternative names to be checked in token for every field in
+        # revoke tree.
+        alternatives = {
+            'user_id': ['user_id', 'trustor_id', 'trustee_id'],
+            'domain_id': ['identity_domain_id', 'assignment_domain_id'],
+            # For a domain-scoped token, the domain is in assignment_domain_id.
+            'domain_scope_id': ['assignment_domain_id', ],
+        }
+        # Contains current forest (collection of trees) to be checked.
+        partial_matches = [self.revoke_map]
+        # We iterate over every layer of our revoke tree (except the last one).
+        for name in _EVENT_NAMES:
+            # bundle is the set of partial matches for the next level down
+            # the tree
+            bundle = []
+            wildcard = '%s=*' % (name,)
+            # For every tree in current forest.
+            for tree in partial_matches:
+                # If there is wildcard node on current level we take it.
+                bundle.append(tree.get(wildcard))
+                if name == 'role_id':
+                    # Roles are very special since a token has a list of them.
+                    # If the revocation event matches any one of them,
+                    # revoke the token.
+                    for role_id in token_data.get('roles', []):
+                        bundle.append(tree.get('role_id=%s' % role_id))
+                else:
+                    # For other fields we try to get any branch that concur
+                    # with any alternative field in the token.
+                    for alt_name in alternatives.get(name, [name]):
+                        bundle.append(
+                            tree.get('%s=%s' % (name, token_data[alt_name])))
+            # tree.get returns `None` if there is no match, so `bundle.append`
+            # adds a 'None' entry. This call remoes the `None` entries.
+            partial_matches = [x for x in bundle if x is not None]
+            if not partial_matches:
+                # If we end up with no branches to follow means that the token
+                # is definitely not in the revoke tree and all further
+                # iterations will be for nothing.
+                return False
+
+        # The last (leaf) level is checked in a special way because we verify
+        # issued_at field differently.
+        for leaf in partial_matches:
+            try:
+                if leaf['issued_before'] > token_data['issued_at']:
+                    return True
+            except KeyError:
+                pass
+        # If we made it out of the loop then no element in revocation tree
+        # corresponds to our token and it is good.
+        return False
+
+
+def build_token_values_v2(access, default_domain_id):
+    token_data = access['token']
+
+    token_expires_at = timeutils.parse_isotime(token_data['expires'])
+
+    # Trim off the microseconds because the revocation event only has
+    # expirations accurate to the second.
+    token_expires_at = token_expires_at.replace(microsecond=0)
+
+    token_values = {
+        'expires_at': timeutils.normalize_time(token_expires_at),
+        'issued_at': timeutils.normalize_time(
+            timeutils.parse_isotime(token_data['issued_at'])),
+        'audit_id': token_data.get('audit_ids', [None])[0],
+        'audit_chain_id': token_data.get('audit_ids', [None])[-1],
+    }
+
+    token_values['user_id'] = access.get('user', {}).get('id')
+
+    project = token_data.get('tenant')
+    if project is not None:
+        token_values['project_id'] = project['id']
+    else:
+        token_values['project_id'] = None
+
+    token_values['identity_domain_id'] = default_domain_id
+    token_values['assignment_domain_id'] = default_domain_id
+
+    trust = token_data.get('trust')
+    if trust is None:
+        token_values['trust_id'] = None
+        token_values['trustor_id'] = None
+        token_values['trustee_id'] = None
+    else:
+        token_values['trust_id'] = trust['id']
+        token_values['trustor_id'] = trust['trustor_id']
+        token_values['trustee_id'] = trust['trustee_id']
+
+    token_values['consumer_id'] = None
+    token_values['access_token_id'] = None
+
+    role_list = []
+    # Roles are by ID in metadata and by name in the user section
+    roles = access.get('metadata', {}).get('roles', [])
+    for role in roles:
+        role_list.append(role)
+    token_values['roles'] = role_list
+    return token_values
+
+
+def build_token_values(token_data):
+
+    token_expires_at = timeutils.parse_isotime(token_data['expires_at'])
+
+    # Trim off the microseconds because the revocation event only has
+    # expirations accurate to the second.
+    token_expires_at = token_expires_at.replace(microsecond=0)
+
+    token_values = {
+        'expires_at': timeutils.normalize_time(token_expires_at),
+        'issued_at': timeutils.normalize_time(
+            timeutils.parse_isotime(token_data['issued_at'])),
+        'audit_id': token_data.get('audit_ids', [None])[0],
+        'audit_chain_id': token_data.get('audit_ids', [None])[-1],
+    }
+
+    user = token_data.get('user')
+    if user is not None:
+        token_values['user_id'] = user['id']
+        # Federated users do not have a domain, be defensive and get the user
+        # domain set to None in the federated user case.
+        token_values['identity_domain_id'] = user.get('domain', {}).get('id')
+    else:
+        token_values['user_id'] = None
+        token_values['identity_domain_id'] = None
+
+    project = token_data.get('project', token_data.get('tenant'))
+    if project is not None:
+        token_values['project_id'] = project['id']
+        token_values['assignment_domain_id'] = project['domain']['id']
+    else:
+        token_values['project_id'] = None
+
+        domain = token_data.get('domain')
+        if domain is not None:
+            token_values['assignment_domain_id'] = domain['id']
+        else:
+            token_values['assignment_domain_id'] = None
+
+    role_list = []
+    roles = token_data.get('roles')
+    if roles is not None:
+        for role in roles:
+            role_list.append(role['id'])
+    token_values['roles'] = role_list
+
+    trust = token_data.get('OS-TRUST:trust')
+    if trust is None:
+        token_values['trust_id'] = None
+        token_values['trustor_id'] = None
+        token_values['trustee_id'] = None
+    else:
+        token_values['trust_id'] = trust['id']
+        token_values['trustor_id'] = trust['trustor_user']['id']
+        token_values['trustee_id'] = trust['trustee_user']['id']
+
+    oauth1 = token_data.get('OS-OAUTH1')
+    if oauth1 is None:
+        token_values['consumer_id'] = None
+        token_values['access_token_id'] = None
+    else:
+        token_values['consumer_id'] = oauth1['consumer_id']
+        token_values['access_token_id'] = oauth1['access_token_id']
+    return token_values
diff --git a/keystone-moon/keystone/contrib/revoke/routers.py b/keystone-moon/keystone/contrib/revoke/routers.py
new file mode 100644 (file)
index 0000000..4d2edfc
--- /dev/null
@@ -0,0 +1,29 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import json_home
+from keystone.common import wsgi
+from keystone.contrib.revoke import controllers
+
+
+class RevokeExtension(wsgi.V3ExtensionRouter):
+
+    PATH_PREFIX = '/OS-REVOKE'
+
+    def add_routes(self, mapper):
+        revoke_controller = controllers.RevokeController()
+        self._add_resource(
+            mapper, revoke_controller,
+            path=self.PATH_PREFIX + '/events',
+            get_action='list_revoke_events',
+            rel=json_home.build_v3_extension_resource_relation(
+                'OS-REVOKE', '1.0', 'events'))
diff --git a/keystone-moon/keystone/contrib/s3/__init__.py b/keystone-moon/keystone/contrib/s3/__init__.py
new file mode 100644 (file)
index 0000000..eec77c7
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.contrib.s3.core import *  # noqa
diff --git a/keystone-moon/keystone/contrib/s3/core.py b/keystone-moon/keystone/contrib/s3/core.py
new file mode 100644 (file)
index 0000000..34095bf
--- /dev/null
@@ -0,0 +1,73 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Main entry point into the S3 Credentials service.
+
+This service provides S3 token validation for services configured with the
+s3_token middleware to authorize S3 requests.
+
+This service uses the same credentials used by EC2. Refer to the documentation
+for the EC2 module for how to generate the required credentials.
+"""
+
+import base64
+import hashlib
+import hmac
+
+from keystone.common import extension
+from keystone.common import json_home
+from keystone.common import utils
+from keystone.common import wsgi
+from keystone.contrib.ec2 import controllers
+from keystone import exception
+
+EXTENSION_DATA = {
+    'name': 'OpenStack S3 API',
+    'namespace': 'http://docs.openstack.org/identity/api/ext/'
+                 's3tokens/v1.0',
+    'alias': 's3tokens',
+    'updated': '2013-07-07T12:00:0-00:00',
+    'description': 'OpenStack S3 API.',
+    'links': [
+        {
+            'rel': 'describedby',
+            # TODO(ayoung): needs a description
+            'type': 'text/html',
+            'href': 'https://github.com/openstack/identity-api',
+        }
+    ]}
+extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
+
+
+class S3Extension(wsgi.V3ExtensionRouter):
+    def add_routes(self, mapper):
+        controller = S3Controller()
+        # validation
+        self._add_resource(
+            mapper, controller,
+            path='/s3tokens',
+            post_action='authenticate',
+            rel=json_home.build_v3_extension_resource_relation(
+                's3tokens', '1.0', 's3tokens'))
+
+
+class S3Controller(controllers.Ec2Controller):
+    def check_signature(self, creds_ref, credentials):
+        msg = base64.urlsafe_b64decode(str(credentials['token']))
+        key = str(creds_ref['secret'])
+        signed = base64.encodestring(
+            hmac.new(key, msg, hashlib.sha1).digest()).strip()
+
+        if not utils.auth_str_equal(credentials['signature'], signed):
+            raise exception.Unauthorized('Credential signature mismatch')
diff --git a/keystone-moon/keystone/contrib/simple_cert/__init__.py b/keystone-moon/keystone/contrib/simple_cert/__init__.py
new file mode 100644 (file)
index 0000000..b213192
--- /dev/null
@@ -0,0 +1,14 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.contrib.simple_cert.core import *  # noqa
+from keystone.contrib.simple_cert.routers import SimpleCertExtension  # noqa
diff --git a/keystone-moon/keystone/contrib/simple_cert/controllers.py b/keystone-moon/keystone/contrib/simple_cert/controllers.py
new file mode 100644 (file)
index 0000000..d34c03a
--- /dev/null
@@ -0,0 +1,42 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+import webob
+
+from keystone.common import controller
+from keystone.common import dependency
+from keystone import exception
+
+CONF = cfg.CONF
+
+
+@dependency.requires('token_provider_api')
+class SimpleCert(controller.V3Controller):
+
+    def _get_certificate(self, name):
+        try:
+            with open(name, 'r') as f:
+                body = f.read()
+        except IOError:
+            raise exception.CertificateFilesUnavailable()
+
+        # NOTE(jamielennox): We construct the webob Response ourselves here so
+        # that we don't pass through the JSON encoding process.
+        headers = [('Content-Type', 'application/x-pem-file')]
+        return webob.Response(body=body, headerlist=headers, status="200 OK")
+
+    def get_ca_certificate(self, context):
+        return self._get_certificate(CONF.signing.ca_certs)
+
+    def list_certificates(self, context):
+        return self._get_certificate(CONF.signing.certfile)
diff --git a/keystone-moon/keystone/contrib/simple_cert/core.py b/keystone-moon/keystone/contrib/simple_cert/core.py
new file mode 100644 (file)
index 0000000..531c6aa
--- /dev/null
@@ -0,0 +1,32 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import extension
+
+EXTENSION_DATA = {
+    'name': 'OpenStack Simple Certificate API',
+    'namespace': 'http://docs.openstack.org/identity/api/ext/'
+                 'OS-SIMPLE-CERT/v1.0',
+    'alias': 'OS-SIMPLE-CERT',
+    'updated': '2014-01-20T12:00:0-00:00',
+    'description': 'OpenStack simple certificate retrieval extension',
+    'links': [
+        {
+            'rel': 'describedby',
+            # TODO(dolph): link needs to be revised after
+            #              bug 928059 merges
+            'type': 'text/html',
+            'href': 'https://github.com/openstack/identity-api',
+        }
+    ]}
+extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
+extension.register_public_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
diff --git a/keystone-moon/keystone/contrib/simple_cert/routers.py b/keystone-moon/keystone/contrib/simple_cert/routers.py
new file mode 100644 (file)
index 0000000..8c36c2a
--- /dev/null
@@ -0,0 +1,41 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+
+from keystone.common import json_home
+from keystone.common import wsgi
+from keystone.contrib.simple_cert import controllers
+
+
+build_resource_relation = functools.partial(
+    json_home.build_v3_extension_resource_relation,
+    extension_name='OS-SIMPLE-CERT', extension_version='1.0')
+
+
+class SimpleCertExtension(wsgi.V3ExtensionRouter):
+
+    PREFIX = 'OS-SIMPLE-CERT'
+
+    def add_routes(self, mapper):
+        controller = controllers.SimpleCert()
+
+        self._add_resource(
+            mapper, controller,
+            path='/%s/ca' % self.PREFIX,
+            get_action='get_ca_certificate',
+            rel=build_resource_relation(resource_name='ca_certificate'))
+        self._add_resource(
+            mapper, controller,
+            path='/%s/certificates' % self.PREFIX,
+            get_action='list_certificates',
+            rel=build_resource_relation(resource_name='certificates'))
diff --git a/keystone-moon/keystone/contrib/user_crud/__init__.py b/keystone-moon/keystone/contrib/user_crud/__init__.py
new file mode 100644 (file)
index 0000000..271ceee
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright 2012 Red Hat, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.contrib.user_crud.core import *  # noqa
diff --git a/keystone-moon/keystone/contrib/user_crud/core.py b/keystone-moon/keystone/contrib/user_crud/core.py
new file mode 100644 (file)
index 0000000..dd16d3a
--- /dev/null
@@ -0,0 +1,134 @@
+# Copyright 2012 Red Hat, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import uuid
+
+from oslo_log import log
+
+from keystone.common import dependency
+from keystone.common import extension
+from keystone.common import wsgi
+from keystone import exception
+from keystone import identity
+from keystone.models import token_model
+
+
+LOG = log.getLogger(__name__)
+
+
+extension.register_public_extension(
+    'OS-KSCRUD', {
+        'name': 'OpenStack Keystone User CRUD',
+        'namespace': 'http://docs.openstack.org/identity/api/ext/'
+                     'OS-KSCRUD/v1.0',
+        'alias': 'OS-KSCRUD',
+        'updated': '2013-07-07T12:00:0-00:00',
+        'description': 'OpenStack extensions to Keystone v2.0 API '
+                       'enabling User Operations.',
+        'links': [
+            {
+                'rel': 'describedby',
+                # TODO(ayoung): needs a description
+                'type': 'text/html',
+                'href': 'https://github.com/openstack/identity-api',
+            }
+        ]})
+
+
+@dependency.requires('catalog_api', 'identity_api', 'resource_api',
+                     'token_provider_api')
+class UserController(identity.controllers.User):
+    def set_user_password(self, context, user_id, user):
+        token_id = context.get('token_id')
+        original_password = user.get('original_password')
+
+        token_data = self.token_provider_api.validate_token(token_id)
+        token_ref = token_model.KeystoneToken(token_id=token_id,
+                                              token_data=token_data)
+
+        if token_ref.user_id != user_id:
+            raise exception.Forbidden('Token belongs to another user')
+        if original_password is None:
+            raise exception.ValidationError(target='user',
+                                            attribute='original password')
+
+        try:
+            user_ref = self.identity_api.authenticate(
+                context,
+                user_id=token_ref.user_id,
+                password=original_password)
+            if not user_ref.get('enabled', True):
+                # NOTE(dolph): why can't you set a disabled user's password?
+                raise exception.Unauthorized('User is disabled')
+        except AssertionError:
+            raise exception.Unauthorized()
+
+        update_dict = {'password': user['password'], 'id': user_id}
+
+        admin_context = copy.copy(context)
+        admin_context['is_admin'] = True
+        super(UserController, self).set_user_password(admin_context,
+                                                      user_id,
+                                                      update_dict)
+
+        # Issue a new token based upon the original token data. This will
+        # always be a V2.0 token.
+
+        # TODO(morganfainberg): Add a mechanism to issue a new token directly
+        # from a token model so that this code can go away. This is likely
+        # not the norm as most cases do not need to yank apart a token to
+        # issue a new one.
+        new_token_ref = {}
+        metadata_ref = {}
+        roles_ref = None
+
+        new_token_ref['user'] = user_ref
+        if token_ref.bind:
+            new_token_ref['bind'] = token_ref.bind
+        if token_ref.project_id:
+            new_token_ref['tenant'] = self.resource_api.get_project(
+                token_ref.project_id)
+        if token_ref.role_names:
+            roles_ref = [dict(name=value)
+                         for value in token_ref.role_names]
+        if token_ref.role_ids:
+            metadata_ref['roles'] = token_ref.role_ids
+        if token_ref.trust_id:
+            metadata_ref['trust'] = {
+                'id': token_ref.trust_id,
+                'trustee_user_id': token_ref.trustee_user_id}
+        new_token_ref['metadata'] = metadata_ref
+        new_token_ref['id'] = uuid.uuid4().hex
+
+        catalog_ref = self.catalog_api.get_catalog(user_id,
+                                                   token_ref.project_id)
+
+        new_token_id, new_token_data = self.token_provider_api.issue_v2_token(
+            token_ref=new_token_ref, roles_ref=roles_ref,
+            catalog_ref=catalog_ref)
+        LOG.debug('TOKEN_REF %s', new_token_data)
+        return new_token_data
+
+
+class CrudExtension(wsgi.ExtensionRouter):
+    """Provides a subset of CRUD operations for internal data types."""
+
+    def add_routes(self, mapper):
+        user_controller = UserController()
+
+        mapper.connect('/OS-KSCRUD/users/{user_id}',
+                       controller=user_controller,
+                       action='set_user_password',
+                       conditions=dict(method=['PATCH']))
diff --git a/keystone-moon/keystone/controllers.py b/keystone-moon/keystone/controllers.py
new file mode 100644 (file)
index 0000000..12f13c7
--- /dev/null
@@ -0,0 +1,218 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log
+from oslo_serialization import jsonutils
+import webob
+
+from keystone.common import extension
+from keystone.common import json_home
+from keystone.common import wsgi
+from keystone import exception
+
+
+LOG = log.getLogger(__name__)
+
+MEDIA_TYPE_JSON = 'application/vnd.openstack.identity-%s+json'
+
+_VERSIONS = []
+
+# NOTE(blk-u): latest_app will be set by keystone.service.loadapp(). It gets
+# set to the application that was just loaded. In the case of keystone-all,
+# loadapp() gets called twice, once for the public app and once for the admin
+# app. In the case of httpd/keystone, loadapp() gets called once for the public
+# app if this is the public instance or loadapp() gets called for the admin app
+# if it's the admin instance.
+# This is used to fetch the /v3 JSON Home response. The /v3 JSON Home response
+# is the same whether it's the admin or public service so either admin or
+# public works.
+latest_app = None
+
+
+def request_v3_json_home(new_prefix):
+    if 'v3' not in _VERSIONS:
+        # No V3 support, so return an empty JSON Home document.
+        return {'resources': {}}
+
+    req = webob.Request.blank(
+        '/v3', headers={'Accept': 'application/json-home'})
+    v3_json_home_str = req.get_response(latest_app).body
+    v3_json_home = jsonutils.loads(v3_json_home_str)
+    json_home.translate_urls(v3_json_home, new_prefix)
+
+    return v3_json_home
+
+
+class Extensions(wsgi.Application):
+    """Base extensions controller to be extended by public and admin API's."""
+
+    # extend in subclass to specify the set of extensions
+    @property
+    def extensions(self):
+        return None
+
+    def get_extensions_info(self, context):
+        return {'extensions': {'values': self.extensions.values()}}
+
+    def get_extension_info(self, context, extension_alias):
+        try:
+            return {'extension': self.extensions[extension_alias]}
+        except KeyError:
+            raise exception.NotFound(target=extension_alias)
+
+
+class AdminExtensions(Extensions):
+    @property
+    def extensions(self):
+        return extension.ADMIN_EXTENSIONS
+
+
+class PublicExtensions(Extensions):
+    @property
+    def extensions(self):
+        return extension.PUBLIC_EXTENSIONS
+
+
+def register_version(version):
+    _VERSIONS.append(version)
+
+
+class MimeTypes(object):
+    JSON = 'application/json'
+    JSON_HOME = 'application/json-home'
+
+
+def v3_mime_type_best_match(context):
+
+    # accept_header is a WebOb MIMEAccept object so supports best_match.
+    accept_header = context['accept_header']
+
+    if not accept_header:
+        return MimeTypes.JSON
+
+    SUPPORTED_TYPES = [MimeTypes.JSON, MimeTypes.JSON_HOME]
+    return accept_header.best_match(SUPPORTED_TYPES)
+
+
+class Version(wsgi.Application):
+
+    def __init__(self, version_type, routers=None):
+        self.endpoint_url_type = version_type
+        self._routers = routers
+
+        super(Version, self).__init__()
+
+    def _get_identity_url(self, context, version):
+        """Returns a URL to keystone's own endpoint."""
+        url = self.base_url(context, self.endpoint_url_type)
+        return '%s/%s/' % (url, version)
+
+    def _get_versions_list(self, context):
+        """The list of versions is dependent on the context."""
+        versions = {}
+        if 'v2.0' in _VERSIONS:
+            versions['v2.0'] = {
+                'id': 'v2.0',
+                'status': 'stable',
+                'updated': '2014-04-17T00:00:00Z',
+                'links': [
+                    {
+                        'rel': 'self',
+                        'href': self._get_identity_url(context, 'v2.0'),
+                    }, {
+                        'rel': 'describedby',
+                        'type': 'text/html',
+                        'href': 'http://docs.openstack.org/'
+                    }
+                ],
+                'media-types': [
+                    {
+                        'base': 'application/json',
+                        'type': MEDIA_TYPE_JSON % 'v2.0'
+                    }
+                ]
+            }
+
+        if 'v3' in _VERSIONS:
+            versions['v3'] = {
+                'id': 'v3.0',
+                'status': 'stable',
+                'updated': '2013-03-06T00:00:00Z',
+                'links': [
+                    {
+                        'rel': 'self',
+                        'href': self._get_identity_url(context, 'v3'),
+                    }
+                ],
+                'media-types': [
+                    {
+                        'base': 'application/json',
+                        'type': MEDIA_TYPE_JSON % 'v3'
+                    }
+                ]
+            }
+
+        return versions
+
+    def get_versions(self, context):
+
+        req_mime_type = v3_mime_type_best_match(context)
+        if req_mime_type == MimeTypes.JSON_HOME:
+            v3_json_home = request_v3_json_home('/v3')
+            return wsgi.render_response(
+                body=v3_json_home,
+                headers=(('Content-Type', MimeTypes.JSON_HOME),))
+
+        versions = self._get_versions_list(context)
+        return wsgi.render_response(status=(300, 'Multiple Choices'), body={
+            'versions': {
+                'values': versions.values()
+            }
+        })
+
+    def get_version_v2(self, context):
+        versions = self._get_versions_list(context)
+        if 'v2.0' in _VERSIONS:
+            return wsgi.render_response(body={
+                'version': versions['v2.0']
+            })
+        else:
+            raise exception.VersionNotFound(version='v2.0')
+
+    def _get_json_home_v3(self):
+
+        def all_resources():
+            for router in self._routers:
+                for resource in router.v3_resources:
+                    yield resource
+
+        return {
+            'resources': dict(all_resources())
+        }
+
+    def get_version_v3(self, context):
+        versions = self._get_versions_list(context)
+        if 'v3' in _VERSIONS:
+            req_mime_type = v3_mime_type_best_match(context)
+
+            if req_mime_type == MimeTypes.JSON_HOME:
+                return wsgi.render_response(
+                    body=self._get_json_home_v3(),
+                    headers=(('Content-Type', MimeTypes.JSON_HOME),))
+
+            return wsgi.render_response(body={
+                'version': versions['v3']
+            })
+        else:
+            raise exception.VersionNotFound(version='v3')
diff --git a/keystone-moon/keystone/credential/__init__.py b/keystone-moon/keystone/credential/__init__.py
new file mode 100644 (file)
index 0000000..fc7b631
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.credential import controllers  # noqa
+from keystone.credential.core import *  # noqa
+from keystone.credential import routers  # noqa
diff --git a/keystone-moon/keystone/credential/backends/__init__.py b/keystone-moon/keystone/credential/backends/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/credential/backends/sql.py b/keystone-moon/keystone/credential/backends/sql.py
new file mode 100644 (file)
index 0000000..12daed3
--- /dev/null
@@ -0,0 +1,104 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import sql
+from keystone import credential
+from keystone import exception
+
+
+class CredentialModel(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'credential'
+    attributes = ['id', 'user_id', 'project_id', 'blob', 'type']
+    id = sql.Column(sql.String(64), primary_key=True)
+    user_id = sql.Column(sql.String(64),
+                         nullable=False)
+    project_id = sql.Column(sql.String(64))
+    blob = sql.Column(sql.JsonBlob(), nullable=False)
+    type = sql.Column(sql.String(255), nullable=False)
+    extra = sql.Column(sql.JsonBlob())
+
+
+class Credential(credential.Driver):
+
+    # credential crud
+
+    @sql.handle_conflicts(conflict_type='credential')
+    def create_credential(self, credential_id, credential):
+        session = sql.get_session()
+        with session.begin():
+            ref = CredentialModel.from_dict(credential)
+            session.add(ref)
+        return ref.to_dict()
+
+    @sql.truncated
+    def list_credentials(self, hints):
+        session = sql.get_session()
+        credentials = session.query(CredentialModel)
+        credentials = sql.filter_limit_query(CredentialModel,
+                                             credentials, hints)
+        return [s.to_dict() for s in credentials]
+
+    def list_credentials_for_user(self, user_id):
+        session = sql.get_session()
+        query = session.query(CredentialModel)
+        refs = query.filter_by(user_id=user_id).all()
+        return [ref.to_dict() for ref in refs]
+
+    def _get_credential(self, session, credential_id):
+        ref = session.query(CredentialModel).get(credential_id)
+        if ref is None:
+            raise exception.CredentialNotFound(credential_id=credential_id)
+        return ref
+
+    def get_credential(self, credential_id):
+        session = sql.get_session()
+        return self._get_credential(session, credential_id).to_dict()
+
+    @sql.handle_conflicts(conflict_type='credential')
+    def update_credential(self, credential_id, credential):
+        session = sql.get_session()
+        with session.begin():
+            ref = self._get_credential(session, credential_id)
+            old_dict = ref.to_dict()
+            for k in credential:
+                old_dict[k] = credential[k]
+            new_credential = CredentialModel.from_dict(old_dict)
+            for attr in CredentialModel.attributes:
+                if attr != 'id':
+                    setattr(ref, attr, getattr(new_credential, attr))
+            ref.extra = new_credential.extra
+        return ref.to_dict()
+
+    def delete_credential(self, credential_id):
+        session = sql.get_session()
+
+        with session.begin():
+            ref = self._get_credential(session, credential_id)
+            session.delete(ref)
+
+    def delete_credentials_for_project(self, project_id):
+        session = sql.get_session()
+
+        with session.begin():
+            query = session.query(CredentialModel)
+            query = query.filter_by(project_id=project_id)
+            query.delete()
+
+    def delete_credentials_for_user(self, user_id):
+        session = sql.get_session()
+
+        with session.begin():
+            query = session.query(CredentialModel)
+            query = query.filter_by(user_id=user_id)
+            query.delete()
diff --git a/keystone-moon/keystone/credential/controllers.py b/keystone-moon/keystone/credential/controllers.py
new file mode 100644 (file)
index 0000000..65c1727
--- /dev/null
@@ -0,0 +1,108 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import hashlib
+
+from oslo_serialization import jsonutils
+
+from keystone.common import controller
+from keystone.common import dependency
+from keystone.common import validation
+from keystone.credential import schema
+from keystone import exception
+from keystone.i18n import _
+
+
+@dependency.requires('credential_api')
+class CredentialV3(controller.V3Controller):
+    collection_name = 'credentials'
+    member_name = 'credential'
+
+    def __init__(self):
+        super(CredentialV3, self).__init__()
+        self.get_member_from_driver = self.credential_api.get_credential
+
+    def _assign_unique_id(self, ref, trust_id=None):
+        # Generates and assigns a unique identifier to
+        # a credential reference.
+        if ref.get('type', '').lower() == 'ec2':
+            try:
+                blob = jsonutils.loads(ref.get('blob'))
+            except (ValueError, TypeError):
+                raise exception.ValidationError(
+                    message=_('Invalid blob in credential'))
+            if not blob or not isinstance(blob, dict):
+                raise exception.ValidationError(attribute='blob',
+                                                target='credential')
+            if blob.get('access') is None:
+                raise exception.ValidationError(attribute='access',
+                                                target='blob')
+            ret_ref = ref.copy()
+            ret_ref['id'] = hashlib.sha256(blob['access']).hexdigest()
+            # Update the blob with the trust_id, so credentials created
+            # with a trust scoped token will result in trust scoped
+            # tokens when authentication via ec2tokens happens
+            if trust_id is not None:
+                blob['trust_id'] = trust_id
+                ret_ref['blob'] = jsonutils.dumps(blob)
+            return ret_ref
+        else:
+            return super(CredentialV3, self)._assign_unique_id(ref)
+
+    @controller.protected()
+    @validation.validated(schema.credential_create, 'credential')
+    def create_credential(self, context, credential):
+        trust_id = self._get_trust_id_for_request(context)
+        ref = self._assign_unique_id(self._normalize_dict(credential),
+                                     trust_id)
+        ref = self.credential_api.create_credential(ref['id'], ref)
+        return CredentialV3.wrap_member(context, ref)
+
+    @staticmethod
+    def _blob_to_json(ref):
+        # credentials stored via ec2tokens before the fix for #1259584
+        # need json serializing, as that's the documented API format
+        blob = ref.get('blob')
+        if isinstance(blob, dict):
+            new_ref = ref.copy()
+            new_ref['blob'] = jsonutils.dumps(blob)
+            return new_ref
+        else:
+            return ref
+
+    @controller.filterprotected('user_id')
+    def list_credentials(self, context, filters):
+        hints = CredentialV3.build_driver_hints(context, filters)
+        refs = self.credential_api.list_credentials(hints)
+        ret_refs = [self._blob_to_json(r) for r in refs]
+        return CredentialV3.wrap_collection(context, ret_refs,
+                                            hints=hints)
+
+    @controller.protected()
+    def get_credential(self, context, credential_id):
+        ref = self.credential_api.get_credential(credential_id)
+        ret_ref = self._blob_to_json(ref)
+        return CredentialV3.wrap_member(context, ret_ref)
+
+    @controller.protected()
+    @validation.validated(schema.credential_update, 'credential')
+    def update_credential(self, context, credential_id, credential):
+        self._require_matching_id(credential_id, credential)
+
+        ref = self.credential_api.update_credential(credential_id, credential)
+        return CredentialV3.wrap_member(context, ref)
+
+    @controller.protected()
+    def delete_credential(self, context, credential_id):
+        return self.credential_api.delete_credential(credential_id)
diff --git a/keystone-moon/keystone/credential/core.py b/keystone-moon/keystone/credential/core.py
new file mode 100644 (file)
index 0000000..d3354ea
--- /dev/null
@@ -0,0 +1,140 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Main entry point into the Credentials service."""
+
+import abc
+
+from oslo_config import cfg
+from oslo_log import log
+import six
+
+from keystone.common import dependency
+from keystone.common import driver_hints
+from keystone.common import manager
+from keystone import exception
+
+
+CONF = cfg.CONF
+
+LOG = log.getLogger(__name__)
+
+
+@dependency.provider('credential_api')
+class Manager(manager.Manager):
+    """Default pivot point for the Credential backend.
+
+    See :mod:`keystone.common.manager.Manager` for more details on how this
+    dynamically calls the backend.
+
+    """
+
+    def __init__(self):
+        super(Manager, self).__init__(CONF.credential.driver)
+
+    @manager.response_truncated
+    def list_credentials(self, hints=None):
+        return self.driver.list_credentials(hints or driver_hints.Hints())
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Driver(object):
+    # credential crud
+
+    @abc.abstractmethod
+    def create_credential(self, credential_id, credential):
+        """Creates a new credential.
+
+        :raises: keystone.exception.Conflict
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_credentials(self, hints):
+        """List all credentials.
+
+        :param hints: contains the list of filters yet to be satisfied.
+                      Any filters satisfied here will be removed so that
+                      the caller will know if any filters remain.
+
+        :returns: a list of credential_refs or an empty list.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_credentials_for_user(self, user_id):
+        """List credentials for a user.
+
+        :param user_id: ID of a user to filter credentials by.
+
+        :returns: a list of credential_refs or an empty list.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_credential(self, credential_id):
+        """Get a credential by ID.
+
+        :returns: credential_ref
+        :raises: keystone.exception.CredentialNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def update_credential(self, credential_id, credential):
+        """Updates an existing credential.
+
+        :raises: keystone.exception.CredentialNotFound,
+                 keystone.exception.Conflict
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_credential(self, credential_id):
+        """Deletes an existing credential.
+
+        :raises: keystone.exception.CredentialNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_credentials_for_project(self, project_id):
+        """Deletes all credentials for a project."""
+        self._delete_credentials(lambda cr: cr['project_id'] == project_id)
+
+    @abc.abstractmethod
+    def delete_credentials_for_user(self, user_id):
+        """Deletes all credentials for a user."""
+        self._delete_credentials(lambda cr: cr['user_id'] == user_id)
+
+    def _delete_credentials(self, match_fn):
+        """Do the actual credential deletion work (default implementation).
+
+        :param match_fn: function that takes a credential dict as the
+                         parameter and returns true or false if the
+                         identifier matches the credential dict.
+        """
+        for cr in self.list_credentials():
+            if match_fn(cr):
+                try:
+                    self.credential_api.delete_credential(cr['id'])
+                except exception.CredentialNotFound:
+                    LOG.debug('Deletion of credential is not required: %s',
+                              cr['id'])
diff --git a/keystone-moon/keystone/credential/routers.py b/keystone-moon/keystone/credential/routers.py
new file mode 100644 (file)
index 0000000..db3651f
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""WSGI Routers for the Credentials service."""
+
+from keystone.common import router
+from keystone.common import wsgi
+from keystone.credential import controllers
+
+
+class Routers(wsgi.RoutersBase):
+
+    def append_v3_routers(self, mapper, routers):
+        routers.append(
+            router.Router(controllers.CredentialV3(),
+                          'credentials', 'credential',
+                          resource_descriptions=self.v3_resources))
diff --git a/keystone-moon/keystone/credential/schema.py b/keystone-moon/keystone/credential/schema.py
new file mode 100644 (file)
index 0000000..749f0c0
--- /dev/null
@@ -0,0 +1,62 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+_credential_properties = {
+    'blob': {
+        'type': 'string'
+    },
+    'project_id': {
+        'type': 'string'
+    },
+    'type': {
+        'type': 'string'
+    },
+    'user_id': {
+        'type': 'string'
+    }
+}
+
+credential_create = {
+    'type': 'object',
+    'properties': _credential_properties,
+    'additionalProperties': True,
+    'oneOf': [
+        {
+            'title': 'ec2 credential requires project_id',
+            'required': ['blob', 'type', 'user_id', 'project_id'],
+            'properties': {
+                'type': {
+                    'enum': ['ec2']
+                }
+            }
+        },
+        {
+            'title': 'non-ec2 credential does not require project_id',
+            'required': ['blob', 'type', 'user_id'],
+            'properties': {
+                'type': {
+                    'not': {
+                        'enum': ['ec2']
+                    }
+                }
+            }
+        }
+    ]
+}
+
+credential_update = {
+    'type': 'object',
+    'properties': _credential_properties,
+    'minProperties': 1,
+    'additionalProperties': True
+}
diff --git a/keystone-moon/keystone/exception.py b/keystone-moon/keystone/exception.py
new file mode 100644 (file)
index 0000000..6749fdc
--- /dev/null
@@ -0,0 +1,469 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import encodeutils
+import six
+
+from keystone.i18n import _, _LW
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+# Tests use this to make exception message format errors fatal
+_FATAL_EXCEPTION_FORMAT_ERRORS = False
+
+
+class Error(Exception):
+    """Base error class.
+
+    Child classes should define an HTTP status code, title, and a
+    message_format.
+
+    """
+    code = None
+    title = None
+    message_format = None
+
+    def __init__(self, message=None, **kwargs):
+        try:
+            message = self._build_message(message, **kwargs)
+        except KeyError:
+            # if you see this warning in your logs, please raise a bug report
+            if _FATAL_EXCEPTION_FORMAT_ERRORS:
+                raise
+            else:
+                LOG.warning(_LW('missing exception kwargs (programmer error)'))
+                message = self.message_format
+
+        super(Error, self).__init__(message)
+
+    def _build_message(self, message, **kwargs):
+        """Builds and returns an exception message.
+
+        :raises: KeyError given insufficient kwargs
+
+        """
+        if not message:
+            try:
+                message = self.message_format % kwargs
+            except UnicodeDecodeError:
+                try:
+                    kwargs = {k: encodeutils.safe_decode(v)
+                              for k, v in six.iteritems(kwargs)}
+                except UnicodeDecodeError:
+                    # NOTE(jamielennox): This is the complete failure case
+                    # at least by showing the template we have some idea
+                    # of where the error is coming from
+                    message = self.message_format
+                else:
+                    message = self.message_format % kwargs
+
+        return message
+
+
+class ValidationError(Error):
+    message_format = _("Expecting to find %(attribute)s in %(target)s -"
+                       " the server could not comply with the request"
+                       " since it is either malformed or otherwise"
+                       " incorrect. The client is assumed to be in error.")
+    code = 400
+    title = 'Bad Request'
+
+
+class SchemaValidationError(ValidationError):
+    # NOTE(lbragstad): For whole OpenStack message consistency, this error
+    # message has been written in a format consistent with WSME.
+    message_format = _("%(detail)s")
+
+
+class ValidationTimeStampError(Error):
+    message_format = _("Timestamp not in expected format."
+                       " The server could not comply with the request"
+                       " since it is either malformed or otherwise"
+                       " incorrect. The client is assumed to be in error.")
+    code = 400
+    title = 'Bad Request'
+
+
+class StringLengthExceeded(ValidationError):
+    message_format = _("String length exceeded.The length of"
+                       " string '%(string)s' exceeded the limit"
+                       " of column %(type)s(CHAR(%(length)d)).")
+
+
+class ValidationSizeError(Error):
+    message_format = _("Request attribute %(attribute)s must be"
+                       " less than or equal to %(size)i. The server"
+                       " could not comply with the request because"
+                       " the attribute size is invalid (too large)."
+                       " The client is assumed to be in error.")
+    code = 400
+    title = 'Bad Request'
+
+
+class CircularRegionHierarchyError(Error):
+    message_format = _("The specified parent region %(parent_region_id)s "
+                       "would create a circular region hierarchy.")
+    code = 400
+    title = 'Bad Request'
+
+
+class PasswordVerificationError(Error):
+    message_format = _("The password length must be less than or equal "
+                       "to %(size)i. The server could not comply with the "
+                       "request because the password is invalid.")
+    code = 403
+    title = 'Forbidden'
+
+
+class RegionDeletionError(Error):
+    message_format = _("Unable to delete region %(region_id)s because it or "
+                       "its child regions have associated endpoints.")
+    code = 403
+    title = 'Forbidden'
+
+
+class PKITokenExpected(Error):
+    message_format = _('The certificates you requested are not available. '
+                       'It is likely that this server does not use PKI tokens '
+                       'otherwise this is the result of misconfiguration.')
+    code = 403
+    title = 'Cannot retrieve certificates'
+
+
+class SecurityError(Error):
+    """Avoids exposing details of security failures, unless in debug mode."""
+    amendment = _('(Disable debug mode to suppress these details.)')
+
+    def _build_message(self, message, **kwargs):
+        """Only returns detailed messages in debug mode."""
+        if CONF.debug:
+            return _('%(message)s %(amendment)s') % {
+                'message': message or self.message_format % kwargs,
+                'amendment': self.amendment}
+        else:
+            return self.message_format % kwargs
+
+
+class Unauthorized(SecurityError):
+    message_format = _("The request you have made requires authentication.")
+    code = 401
+    title = 'Unauthorized'
+
+
+class AuthPluginException(Unauthorized):
+    message_format = _("Authentication plugin error.")
+
+    def __init__(self, *args, **kwargs):
+        super(AuthPluginException, self).__init__(*args, **kwargs)
+        self.authentication = {}
+
+
+class MissingGroups(Unauthorized):
+    message_format = _("Unable to find valid groups while using "
+                       "mapping %(mapping_id)s")
+
+
+class AuthMethodNotSupported(AuthPluginException):
+    message_format = _("Attempted to authenticate with an unsupported method.")
+
+    def __init__(self, *args, **kwargs):
+        super(AuthMethodNotSupported, self).__init__(*args, **kwargs)
+        self.authentication = {'methods': CONF.auth.methods}
+
+
+class AdditionalAuthRequired(AuthPluginException):
+    message_format = _("Additional authentications steps required.")
+
+    def __init__(self, auth_response=None, **kwargs):
+        super(AdditionalAuthRequired, self).__init__(message=None, **kwargs)
+        self.authentication = auth_response
+
+
+class Forbidden(SecurityError):
+    message_format = _("You are not authorized to perform the"
+                       " requested action.")
+    code = 403
+    title = 'Forbidden'
+
+
+class ForbiddenAction(Forbidden):
+    message_format = _("You are not authorized to perform the"
+                       " requested action: %(action)s")
+
+
+class ImmutableAttributeError(Forbidden):
+    message_format = _("Could not change immutable attribute(s) "
+                       "'%(attributes)s' in target %(target)s")
+
+
+class CrossBackendNotAllowed(Forbidden):
+    message_format = _("Group membership across backend boundaries is not "
+                       "allowed, group in question is %(group_id)s, "
+                       "user is %(user_id)s")
+
+
+class InvalidPolicyAssociation(Forbidden):
+    message_format = _("Invalid mix of entities for policy association - "
+                       "only Endpoint, Service or Region+Service allowed. "
+                       "Request was - Endpoint: %(endpoint_id)s, "
+                       "Service: %(service_id)s, Region: %(region_id)s")
+
+
+class InvalidDomainConfig(Forbidden):
+    message_format = _("Invalid domain specific configuration: %(reason)s")
+
+
+class NotFound(Error):
+    message_format = _("Could not find: %(target)s")
+    code = 404
+    title = 'Not Found'
+
+
+class EndpointNotFound(NotFound):
+    message_format = _("Could not find endpoint: %(endpoint_id)s")
+
+
+class MetadataNotFound(NotFound):
+    """(dolph): metadata is not a user-facing concept,
+    so this exception should not be exposed
+    """
+    message_format = _("An unhandled exception has occurred:"
+                       " Could not find metadata.")
+
+
+class PolicyNotFound(NotFound):
+    message_format = _("Could not find policy: %(policy_id)s")
+
+
+class PolicyAssociationNotFound(NotFound):
+    message_format = _("Could not find policy association")
+
+
+class RoleNotFound(NotFound):
+    message_format = _("Could not find role: %(role_id)s")
+
+
+class RoleAssignmentNotFound(NotFound):
+    message_format = _("Could not find role assignment with role: "
+                       "%(role_id)s, user or group: %(actor_id)s, "
+                       "project or domain: %(target_id)s")
+
+
+class RegionNotFound(NotFound):
+    message_format = _("Could not find region: %(region_id)s")
+
+
+class ServiceNotFound(NotFound):
+    message_format = _("Could not find service: %(service_id)s")
+
+
+class DomainNotFound(NotFound):
+    message_format = _("Could not find domain: %(domain_id)s")
+
+
+class ProjectNotFound(NotFound):
+    message_format = _("Could not find project: %(project_id)s")
+
+
+class InvalidParentProject(NotFound):
+    message_format = _("Cannot create project with parent: %(project_id)s")
+
+
+class TokenNotFound(NotFound):
+    message_format = _("Could not find token: %(token_id)s")
+
+
+class UserNotFound(NotFound):
+    message_format = _("Could not find user: %(user_id)s")
+
+
+class GroupNotFound(NotFound):
+    message_format = _("Could not find group: %(group_id)s")
+
+
+class MappingNotFound(NotFound):
+    message_format = _("Could not find mapping: %(mapping_id)s")
+
+
+class TrustNotFound(NotFound):
+    message_format = _("Could not find trust: %(trust_id)s")
+
+
+class TrustUseLimitReached(Forbidden):
+    message_format = _("No remaining uses for trust: %(trust_id)s")
+
+
+class CredentialNotFound(NotFound):
+    message_format = _("Could not find credential: %(credential_id)s")
+
+
+class VersionNotFound(NotFound):
+    message_format = _("Could not find version: %(version)s")
+
+
+class EndpointGroupNotFound(NotFound):
+    message_format = _("Could not find Endpoint Group: %(endpoint_group_id)s")
+
+
+class IdentityProviderNotFound(NotFound):
+    message_format = _("Could not find Identity Provider: %(idp_id)s")
+
+
+class ServiceProviderNotFound(NotFound):
+    message_format = _("Could not find Service Provider: %(sp_id)s")
+
+
+class FederatedProtocolNotFound(NotFound):
+    message_format = _("Could not find federated protocol %(protocol_id)s for"
+                       " Identity Provider: %(idp_id)s")
+
+
+class PublicIDNotFound(NotFound):
+    # This is used internally and mapped to either User/GroupNotFound or,
+    # Assertion before the exception leaves Keystone.
+    message_format = "%(id)s"
+
+
+class DomainConfigNotFound(NotFound):
+    message_format = _('Could not find %(group_or_option)s in domain '
+                       'configuration for domain %(domain_id)s')
+
+
+class Conflict(Error):
+    message_format = _("Conflict occurred attempting to store %(type)s -"
+                       " %(details)s")
+    code = 409
+    title = 'Conflict'
+
+
+class UnexpectedError(SecurityError):
+    """Avoids exposing details of failures, unless in debug mode."""
+    _message_format = _("An unexpected error prevented the server "
+                        "from fulfilling your request.")
+
+    debug_message_format = _("An unexpected error prevented the server "
+                             "from fulfilling your request: %(exception)s")
+
+    @property
+    def message_format(self):
+        """Return the generic message format string unless debug is enabled."""
+        if CONF.debug:
+            return self.debug_message_format
+        return self._message_format
+
+    def _build_message(self, message, **kwargs):
+        if CONF.debug and 'exception' not in kwargs:
+            # Ensure that exception has a value to be extra defensive for
+            # substitutions and make sure the exception doesn't raise an
+            # exception.
+            kwargs['exception'] = ''
+        return super(UnexpectedError, self)._build_message(message, **kwargs)
+
+    code = 500
+    title = 'Internal Server Error'
+
+
+class TrustConsumeMaximumAttempt(UnexpectedError):
+    debug_message_format = _("Unable to consume trust %(trust_id)s, unable to "
+                             "acquire lock.")
+
+
+class CertificateFilesUnavailable(UnexpectedError):
+    debug_message_format = _("Expected signing certificates are not available "
+                             "on the server. Please check Keystone "
+                             "configuration.")
+
+
+class MalformedEndpoint(UnexpectedError):
+    debug_message_format = _("Malformed endpoint URL (%(endpoint)s),"
+                             " see ERROR log for details.")
+
+
+class MappedGroupNotFound(UnexpectedError):
+    debug_message_format = _("Group %(group_id)s returned by mapping "
+                             "%(mapping_id)s was not found in the backend.")
+
+
+class MetadataFileError(UnexpectedError):
+    message_format = _("Error while reading metadata file, %(reason)s")
+
+
+class AssignmentTypeCalculationError(UnexpectedError):
+    message_format = _(
+        'Unexpected combination of grant attributes - '
+        'User: %(user_id)s, Group: %(group_id)s, Project: %(project_id)s, '
+        'Domain: %(domain_id)s')
+
+
+class NotImplemented(Error):
+    message_format = _("The action you have requested has not"
+                       " been implemented.")
+    code = 501
+    title = 'Not Implemented'
+
+
+class Gone(Error):
+    message_format = _("The service you have requested is no"
+                       " longer available on this server.")
+    code = 410
+    title = 'Gone'
+
+
+class ConfigFileNotFound(UnexpectedError):
+    debug_message_format = _("The Keystone configuration file %(config_file)s "
+                             "could not be found.")
+
+
+class KeysNotFound(UnexpectedError):
+    message_format = _('No encryption keys found; run keystone-manage '
+                       'fernet_setup to bootstrap one.')
+
+
+class MultipleSQLDriversInConfig(UnexpectedError):
+    message_format = _('The Keystone domain-specific configuration has '
+                       'specified more than one SQL driver (only one is '
+                       'permitted): %(source)s.')
+
+
+class MigrationNotProvided(Exception):
+    def __init__(self, mod_name, path):
+        super(MigrationNotProvided, self).__init__(_(
+            "%(mod_name)s doesn't provide database migrations. The migration"
+            " repository path at %(path)s doesn't exist or isn't a directory."
+        ) % {'mod_name': mod_name, 'path': path})
+
+
+class UnsupportedTokenVersionException(Exception):
+    """Token version is unrecognizable or unsupported."""
+    pass
+
+
+class SAMLSigningError(UnexpectedError):
+    debug_message_format = _('Unable to sign SAML assertion. It is likely '
+                             'that this server does not have xmlsec1 '
+                             'installed, or this is the result of '
+                             'misconfiguration. Reason %(reason)s')
+    title = 'Error signing SAML assertion'
+
+
+class OAuthHeadersMissingError(UnexpectedError):
+    debug_message_format = _('No Authorization headers found, cannot proceed '
+                             'with OAuth related calls, if running under '
+                             'HTTPd or Apache, ensure WSGIPassAuthorization '
+                             'is set to On.')
+    title = 'Error retrieving OAuth headers'
diff --git a/keystone-moon/keystone/hacking/__init__.py b/keystone-moon/keystone/hacking/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/hacking/checks.py b/keystone-moon/keystone/hacking/checks.py
new file mode 100644 (file)
index 0000000..5d715d9
--- /dev/null
@@ -0,0 +1,446 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Keystone's pep8 extensions.
+
+In order to make the review process faster and easier for core devs we are
+adding some Keystone specific pep8 checks. This will catch common errors
+so that core devs don't have to.
+
+There are two types of pep8 extensions. One is a function that takes either
+a physical or logical line. The physical or logical line is the first param
+in the function definition and can be followed by other parameters supported
+by pep8. The second type is a class that parses AST trees. For more info
+please see pep8.py.
+"""
+
+import ast
+import re
+
+import six
+
+
+class BaseASTChecker(ast.NodeVisitor):
+    """Provides a simple framework for writing AST-based checks.
+
+    Subclasses should implement visit_* methods like any other AST visitor
+    implementation. When they detect an error for a particular node the
+    method should call ``self.add_error(offending_node)``. Details about
+    where in the code the error occurred will be pulled from the node
+    object.
+
+    Subclasses should also provide a class variable named CHECK_DESC to
+    be used for the human readable error message.
+
+    """
+
+    def __init__(self, tree, filename):
+        """This object is created automatically by pep8.
+
+        :param tree: an AST tree
+        :param filename: name of the file being analyzed
+                         (ignored by our checks)
+        """
+        self._tree = tree
+        self._errors = []
+
+    def run(self):
+        """Called automatically by pep8."""
+        self.visit(self._tree)
+        return self._errors
+
+    def add_error(self, node, message=None):
+        """Add an error caused by a node to the list of errors for pep8."""
+        message = message or self.CHECK_DESC
+        error = (node.lineno, node.col_offset, message, self.__class__)
+        self._errors.append(error)
+
+
+class CheckForMutableDefaultArgs(BaseASTChecker):
+    """Checks for the use of mutable objects as function/method defaults.
+
+    We are only checking for list and dict literals at this time. This means
+    that a developer could specify an instance of their own and cause a bug.
+    The fix for this is probably more work than it's worth because it will
+    get caught during code review.
+
+    """
+
+    CHECK_DESC = 'K001 Using mutable as a function/method default'
+    MUTABLES = (
+        ast.List, ast.ListComp,
+        ast.Dict, ast.DictComp,
+        ast.Set, ast.SetComp,
+        ast.Call)
+
+    def visit_FunctionDef(self, node):
+        for arg in node.args.defaults:
+            if isinstance(arg, self.MUTABLES):
+                self.add_error(arg)
+
+        super(CheckForMutableDefaultArgs, self).generic_visit(node)
+
+
+def block_comments_begin_with_a_space(physical_line, line_number):
+    """There should be a space after the # of block comments.
+
+    There is already a check in pep8 that enforces this rule for
+    inline comments.
+
+    Okay: # this is a comment
+    Okay: #!/usr/bin/python
+    Okay: #  this is a comment
+    K002: #this is a comment
+
+    """
+    MESSAGE = "K002 block comments should start with '# '"
+
+    # shebangs are OK
+    if line_number == 1 and physical_line.startswith('#!'):
+        return
+
+    text = physical_line.strip()
+    if text.startswith('#'):  # look for block comments
+        if len(text) > 1 and not text[1].isspace():
+            return physical_line.index('#'), MESSAGE
+
+
+class CheckForAssertingNoneEquality(BaseASTChecker):
+    """Ensures that code does not use a None with assert(Not*)Equal."""
+
+    CHECK_DESC_IS = ('K003 Use self.assertIsNone(...) when comparing '
+                     'against None')
+    CHECK_DESC_ISNOT = ('K004 Use assertIsNotNone(...) when comparing '
+                        ' against None')
+
+    def visit_Call(self, node):
+        # NOTE(dstanek): I wrote this in a verbose way to make it easier to
+        # read for those that have little experience with Python's AST.
+
+        if isinstance(node.func, ast.Attribute):
+            if node.func.attr == 'assertEqual':
+                for arg in node.args:
+                    if isinstance(arg, ast.Name) and arg.id == 'None':
+                        self.add_error(node, message=self.CHECK_DESC_IS)
+            elif node.func.attr == 'assertNotEqual':
+                for arg in node.args:
+                    if isinstance(arg, ast.Name) and arg.id == 'None':
+                        self.add_error(node, message=self.CHECK_DESC_ISNOT)
+
+        super(CheckForAssertingNoneEquality, self).generic_visit(node)
+
+
+class CheckForLoggingIssues(BaseASTChecker):
+
+    DEBUG_CHECK_DESC = 'K005 Using translated string in debug logging'
+    NONDEBUG_CHECK_DESC = 'K006 Not using translating helper for logging'
+    EXCESS_HELPER_CHECK_DESC = 'K007 Using hints when _ is necessary'
+    LOG_MODULES = ('logging', 'keystone.openstack.common.log')
+    I18N_MODULES = (
+        'keystone.i18n._',
+        'keystone.i18n._LI',
+        'keystone.i18n._LW',
+        'keystone.i18n._LE',
+        'keystone.i18n._LC',
+    )
+    TRANS_HELPER_MAP = {
+        'debug': None,
+        'info': '_LI',
+        'warn': '_LW',
+        'warning': '_LW',
+        'error': '_LE',
+        'exception': '_LE',
+        'critical': '_LC',
+    }
+
+    def __init__(self, tree, filename):
+        super(CheckForLoggingIssues, self).__init__(tree, filename)
+
+        self.logger_names = []
+        self.logger_module_names = []
+        self.i18n_names = {}
+
+        # NOTE(dstanek): this kinda accounts for scopes when talking
+        # about only leaf node in the graph
+        self.assignments = {}
+
+    def generic_visit(self, node):
+        """Called if no explicit visitor function exists for a node."""
+        for field, value in ast.iter_fields(node):
+            if isinstance(value, list):
+                for item in value:
+                    if isinstance(item, ast.AST):
+                        item._parent = node
+                        self.visit(item)
+            elif isinstance(value, ast.AST):
+                value._parent = node
+                self.visit(value)
+
+    def _filter_imports(self, module_name, alias):
+        """Keeps lists of logging and i18n imports
+
+        """
+        if module_name in self.LOG_MODULES:
+            self.logger_module_names.append(alias.asname or alias.name)
+        elif module_name in self.I18N_MODULES:
+            self.i18n_names[alias.asname or alias.name] = alias.name
+
+    def visit_Import(self, node):
+        for alias in node.names:
+            self._filter_imports(alias.name, alias)
+        return super(CheckForLoggingIssues, self).generic_visit(node)
+
+    def visit_ImportFrom(self, node):
+        for alias in node.names:
+            full_name = '%s.%s' % (node.module, alias.name)
+            self._filter_imports(full_name, alias)
+        return super(CheckForLoggingIssues, self).generic_visit(node)
+
+    def _find_name(self, node):
+        """Return the fully qualified name or a Name or Attribute."""
+        if isinstance(node, ast.Name):
+            return node.id
+        elif (isinstance(node, ast.Attribute)
+                and isinstance(node.value, (ast.Name, ast.Attribute))):
+            method_name = node.attr
+            obj_name = self._find_name(node.value)
+            if obj_name is None:
+                return None
+            return obj_name + '.' + method_name
+        elif isinstance(node, six.string_types):
+            return node
+        else:  # could be Subscript, Call or many more
+            return None
+
+    def visit_Assign(self, node):
+        """Look for 'LOG = logging.getLogger'
+
+        This handles the simple case:
+          name = [logging_module].getLogger(...)
+
+          - or -
+
+          name = [i18n_name](...)
+
+        And some much more comple ones:
+          name = [i18n_name](...) % X
+
+          - or -
+
+          self.name = [i18n_name](...) % X
+
+        """
+        attr_node_types = (ast.Name, ast.Attribute)
+
+        if (len(node.targets) != 1
+                or not isinstance(node.targets[0], attr_node_types)):
+            # say no to: "x, y = ..."
+            return super(CheckForLoggingIssues, self).generic_visit(node)
+
+        target_name = self._find_name(node.targets[0])
+
+        if (isinstance(node.value, ast.BinOp) and
+                isinstance(node.value.op, ast.Mod)):
+            if (isinstance(node.value.left, ast.Call) and
+                    isinstance(node.value.left.func, ast.Name) and
+                    node.value.left.func.id in self.i18n_names):
+                # NOTE(dstanek): this is done to match cases like:
+                # `msg = _('something %s') % x`
+                node = ast.Assign(value=node.value.left)
+
+        if not isinstance(node.value, ast.Call):
+            # node.value must be a call to getLogger
+            self.assignments.pop(target_name, None)
+            return super(CheckForLoggingIssues, self).generic_visit(node)
+
+        # is this a call to an i18n function?
+        if (isinstance(node.value.func, ast.Name)
+                and node.value.func.id in self.i18n_names):
+            self.assignments[target_name] = node.value.func.id
+            return super(CheckForLoggingIssues, self).generic_visit(node)
+
+        if (not isinstance(node.value.func, ast.Attribute)
+                or not isinstance(node.value.func.value, attr_node_types)):
+            # function must be an attribute on an object like
+            # logging.getLogger
+            return super(CheckForLoggingIssues, self).generic_visit(node)
+
+        object_name = self._find_name(node.value.func.value)
+        func_name = node.value.func.attr
+
+        if (object_name in self.logger_module_names
+                and func_name == 'getLogger'):
+            self.logger_names.append(target_name)
+
+        return super(CheckForLoggingIssues, self).generic_visit(node)
+
+    def visit_Call(self, node):
+        """Look for the 'LOG.*' calls.
+
+        """
+
+        # obj.method
+        if isinstance(node.func, ast.Attribute):
+            obj_name = self._find_name(node.func.value)
+            if isinstance(node.func.value, ast.Name):
+                method_name = node.func.attr
+            elif isinstance(node.func.value, ast.Attribute):
+                obj_name = self._find_name(node.func.value)
+                method_name = node.func.attr
+            else:  # could be Subscript, Call or many more
+                return super(CheckForLoggingIssues, self).generic_visit(node)
+
+            # must be a logger instance and one of the support logging methods
+            if (obj_name not in self.logger_names
+                    or method_name not in self.TRANS_HELPER_MAP):
+                return super(CheckForLoggingIssues, self).generic_visit(node)
+
+            # the call must have arguments
+            if not len(node.args):
+                return super(CheckForLoggingIssues, self).generic_visit(node)
+
+            if method_name == 'debug':
+                self._process_debug(node)
+            elif method_name in self.TRANS_HELPER_MAP:
+                self._process_non_debug(node, method_name)
+
+        return super(CheckForLoggingIssues, self).generic_visit(node)
+
+    def _process_debug(self, node):
+        msg = node.args[0]  # first arg to a logging method is the msg
+
+        # if first arg is a call to a i18n name
+        if (isinstance(msg, ast.Call)
+                and isinstance(msg.func, ast.Name)
+                and msg.func.id in self.i18n_names):
+            self.add_error(msg, message=self.DEBUG_CHECK_DESC)
+
+        # if the first arg is a reference to a i18n call
+        elif (isinstance(msg, ast.Name)
+                and msg.id in self.assignments
+                and not self._is_raised_later(node, msg.id)):
+            self.add_error(msg, message=self.DEBUG_CHECK_DESC)
+
+    def _process_non_debug(self, node, method_name):
+        msg = node.args[0]  # first arg to a logging method is the msg
+
+        # if first arg is a call to a i18n name
+        if isinstance(msg, ast.Call):
+            try:
+                func_name = msg.func.id
+            except AttributeError:
+                # in the case of logging only an exception, the msg function
+                # will not have an id associated with it, for instance:
+                # LOG.warning(six.text_type(e))
+                return
+
+            # the function name is the correct translation helper
+            # for the logging method
+            if func_name == self.TRANS_HELPER_MAP[method_name]:
+                return
+
+            # the function name is an alias for the correct translation
+            # helper for the loggine method
+            if (self.i18n_names[func_name] ==
+                    self.TRANS_HELPER_MAP[method_name]):
+                return
+
+            self.add_error(msg, message=self.NONDEBUG_CHECK_DESC)
+
+        # if the first arg is not a reference to the correct i18n hint
+        elif isinstance(msg, ast.Name):
+
+            # FIXME(dstanek): to make sure more robust we should be checking
+            # all names passed into a logging method. we can't right now
+            # because:
+            # 1. We have code like this that we'll fix when dealing with the %:
+            #       msg = _('....') % {}
+            #       LOG.warn(msg)
+            # 2. We also do LOG.exception(e) in several places. I'm not sure
+            #    exactly what we should be doing about that.
+            if msg.id not in self.assignments:
+                return
+
+            helper_method_name = self.TRANS_HELPER_MAP[method_name]
+            if (self.assignments[msg.id] != helper_method_name
+                    and not self._is_raised_later(node, msg.id)):
+                self.add_error(msg, message=self.NONDEBUG_CHECK_DESC)
+            elif (self.assignments[msg.id] == helper_method_name
+                    and self._is_raised_later(node, msg.id)):
+                self.add_error(msg, message=self.EXCESS_HELPER_CHECK_DESC)
+
+    def _is_raised_later(self, node, name):
+
+        def find_peers(node):
+            node_for_line = node._parent
+            for _field, value in ast.iter_fields(node._parent._parent):
+                if isinstance(value, list) and node_for_line in value:
+                    return value[value.index(node_for_line) + 1:]
+                continue
+            return []
+
+        peers = find_peers(node)
+        for peer in peers:
+            if isinstance(peer, ast.Raise):
+                if (isinstance(peer.type, ast.Call) and
+                        len(peer.type.args) > 0 and
+                        isinstance(peer.type.args[0], ast.Name) and
+                        name in (a.id for a in peer.type.args)):
+                    return True
+                else:
+                    return False
+            elif isinstance(peer, ast.Assign):
+                if name in (t.id for t in peer.targets):
+                    return False
+
+
+def check_oslo_namespace_imports(logical_line, blank_before, filename):
+    oslo_namespace_imports = re.compile(
+        r"(((from)|(import))\s+oslo\.)|(from\s+oslo\s+import\s+)")
+
+    if re.match(oslo_namespace_imports, logical_line):
+        msg = ("K333: '%s' must be used instead of '%s'.") % (
+            logical_line.replace('oslo.', 'oslo_'),
+            logical_line)
+        yield(0, msg)
+
+
+def dict_constructor_with_sequence_copy(logical_line):
+    """Should use a dict comprehension instead of a dict constructor.
+
+    PEP-0274 introduced dict comprehension with performance enhancement
+    and it also makes code more readable.
+
+    Okay: lower_res = {k.lower(): v for k, v in six.iteritems(res[1])}
+    Okay: fool = dict(a='a', b='b')
+    K008: lower_res = dict((k.lower(), v) for k, v in six.iteritems(res[1]))
+    K008:     attrs = dict([(k, _from_json(v))
+    K008: dict([[i,i] for i in range(3)])
+
+    """
+    MESSAGE = ("K008 Must use a dict comprehension instead of a dict"
+               " constructor with a sequence of key-value pairs.")
+
+    dict_constructor_with_sequence_re = (
+        re.compile(r".*\bdict\((\[)?(\(|\[)(?!\{)"))
+
+    if dict_constructor_with_sequence_re.match(logical_line):
+        yield (0, MESSAGE)
+
+
+def factory(register):
+    register(CheckForMutableDefaultArgs)
+    register(block_comments_begin_with_a_space)
+    register(CheckForAssertingNoneEquality)
+    register(CheckForLoggingIssues)
+    register(check_oslo_namespace_imports)
+    register(dict_constructor_with_sequence_copy)
diff --git a/keystone-moon/keystone/i18n.py b/keystone-moon/keystone/i18n.py
new file mode 100644 (file)
index 0000000..2eb42d3
--- /dev/null
@@ -0,0 +1,37 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""oslo.i18n integration module.
+
+See http://docs.openstack.org/developer/oslo.i18n/usage.html .
+
+"""
+
+import oslo_i18n
+
+
+_translators = oslo_i18n.TranslatorFactory(domain='keystone')
+
+# The primary translation function using the well-known name "_"
+_ = _translators.primary
+
+# Translators for log levels.
+#
+# The abbreviated names are meant to reflect the usual use of a short
+# name like '_'. The "L" is for "log" and the other letter comes from
+# the level.
+_LI = _translators.log_info
+_LW = _translators.log_warning
+_LE = _translators.log_error
+_LC = _translators.log_critical
diff --git a/keystone-moon/keystone/identity/__init__.py b/keystone-moon/keystone/identity/__init__.py
new file mode 100644 (file)
index 0000000..3063b5c
--- /dev/null
@@ -0,0 +1,18 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.identity import controllers  # noqa
+from keystone.identity.core import *  # noqa
+from keystone.identity import generator  # noqa
+from keystone.identity import routers  # noqa
diff --git a/keystone-moon/keystone/identity/backends/__init__.py b/keystone-moon/keystone/identity/backends/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/identity/backends/ldap.py b/keystone-moon/keystone/identity/backends/ldap.py
new file mode 100644 (file)
index 0000000..0f7ee45
--- /dev/null
@@ -0,0 +1,402 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from __future__ import absolute_import
+import uuid
+
+import ldap
+import ldap.filter
+from oslo_config import cfg
+from oslo_log import log
+import six
+
+from keystone import clean
+from keystone.common import driver_hints
+from keystone.common import ldap as common_ldap
+from keystone.common import models
+from keystone import exception
+from keystone.i18n import _
+from keystone import identity
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+class Identity(identity.Driver):
+    def __init__(self, conf=None):
+        super(Identity, self).__init__()
+        if conf is None:
+            conf = CONF
+        self.user = UserApi(conf)
+        self.group = GroupApi(conf)
+
+    def default_assignment_driver(self):
+        return "keystone.assignment.backends.ldap.Assignment"
+
+    def is_domain_aware(self):
+        return False
+
+    def generates_uuids(self):
+        return False
+
+    # Identity interface
+
+    def authenticate(self, user_id, password):
+        try:
+            user_ref = self._get_user(user_id)
+        except exception.UserNotFound:
+            raise AssertionError(_('Invalid user / password'))
+        if not user_id or not password:
+            raise AssertionError(_('Invalid user / password'))
+        conn = None
+        try:
+            conn = self.user.get_connection(user_ref['dn'],
+                                            password, end_user_auth=True)
+            if not conn:
+                raise AssertionError(_('Invalid user / password'))
+        except Exception:
+            raise AssertionError(_('Invalid user / password'))
+        finally:
+            if conn:
+                conn.unbind_s()
+        return self.user.filter_attributes(user_ref)
+
+    def _get_user(self, user_id):
+        return self.user.get(user_id)
+
+    def get_user(self, user_id):
+        return self.user.get_filtered(user_id)
+
+    def list_users(self, hints):
+        return self.user.get_all_filtered(hints)
+
+    def get_user_by_name(self, user_name, domain_id):
+        # domain_id will already have been handled in the Manager layer,
+        # parameter left in so this matches the Driver specification
+        return self.user.filter_attributes(self.user.get_by_name(user_name))
+
+    # CRUD
+    def create_user(self, user_id, user):
+        self.user.check_allow_create()
+        user_ref = self.user.create(user)
+        return self.user.filter_attributes(user_ref)
+
+    def update_user(self, user_id, user):
+        self.user.check_allow_update()
+        old_obj = self.user.get(user_id)
+        if 'name' in user and old_obj.get('name') != user['name']:
+            raise exception.Conflict(_('Cannot change user name'))
+
+        if self.user.enabled_mask:
+            self.user.mask_enabled_attribute(user)
+        elif self.user.enabled_invert and not self.user.enabled_emulation:
+            # We need to invert the enabled value for the old model object
+            # to prevent the LDAP update code from thinking that the enabled
+            # values are already equal.
+            user['enabled'] = not user['enabled']
+            old_obj['enabled'] = not old_obj['enabled']
+
+        self.user.update(user_id, user, old_obj)
+        return self.user.get_filtered(user_id)
+
+    def delete_user(self, user_id):
+        self.user.check_allow_delete()
+        user = self.user.get(user_id)
+        user_dn = user['dn']
+        groups = self.group.list_user_groups(user_dn)
+        for group in groups:
+            self.group.remove_user(user_dn, group['id'], user_id)
+
+        if hasattr(user, 'tenant_id'):
+            self.project.remove_user(user.tenant_id, user_dn)
+        self.user.delete(user_id)
+
+    def create_group(self, group_id, group):
+        self.group.check_allow_create()
+        group['name'] = clean.group_name(group['name'])
+        return common_ldap.filter_entity(self.group.create(group))
+
+    def get_group(self, group_id):
+        return self.group.get_filtered(group_id)
+
+    def get_group_by_name(self, group_name, domain_id):
+        # domain_id will already have been handled in the Manager layer,
+        # parameter left in so this matches the Driver specification
+        return self.group.get_filtered_by_name(group_name)
+
+    def update_group(self, group_id, group):
+        self.group.check_allow_update()
+        if 'name' in group:
+            group['name'] = clean.group_name(group['name'])
+        return common_ldap.filter_entity(self.group.update(group_id, group))
+
+    def delete_group(self, group_id):
+        self.group.check_allow_delete()
+        return self.group.delete(group_id)
+
+    def add_user_to_group(self, user_id, group_id):
+        user_ref = self._get_user(user_id)
+        user_dn = user_ref['dn']
+        self.group.add_user(user_dn, group_id, user_id)
+
+    def remove_user_from_group(self, user_id, group_id):
+        user_ref = self._get_user(user_id)
+        user_dn = user_ref['dn']
+        self.group.remove_user(user_dn, group_id, user_id)
+
+    def list_groups_for_user(self, user_id, hints):
+        user_ref = self._get_user(user_id)
+        user_dn = user_ref['dn']
+        return self.group.list_user_groups_filtered(user_dn, hints)
+
+    def list_groups(self, hints):
+        return self.group.get_all_filtered(hints)
+
+    def list_users_in_group(self, group_id, hints):
+        users = []
+        for user_dn in self.group.list_group_users(group_id):
+            user_id = self.user._dn_to_id(user_dn)
+            try:
+                users.append(self.user.get_filtered(user_id))
+            except exception.UserNotFound:
+                LOG.debug(("Group member '%(user_dn)s' not found in"
+                           " '%(group_id)s'. The user should be removed"
+                           " from the group. The user will be ignored."),
+                          dict(user_dn=user_dn, group_id=group_id))
+        return users
+
+    def check_user_in_group(self, user_id, group_id):
+        user_refs = self.list_users_in_group(group_id, driver_hints.Hints())
+        for x in user_refs:
+            if x['id'] == user_id:
+                break
+        else:
+            # Try to fetch the user to see if it even exists.  This
+            # will raise a more accurate exception.
+            self.get_user(user_id)
+            raise exception.NotFound(_("User '%(user_id)s' not found in"
+                                       " group '%(group_id)s'") %
+                                     {'user_id': user_id,
+                                      'group_id': group_id})
+
+
+# TODO(termie): turn this into a data object and move logic to driver
+class UserApi(common_ldap.EnabledEmuMixIn, common_ldap.BaseLdap):
+    DEFAULT_OU = 'ou=Users'
+    DEFAULT_STRUCTURAL_CLASSES = ['person']
+    DEFAULT_ID_ATTR = 'cn'
+    DEFAULT_OBJECTCLASS = 'inetOrgPerson'
+    NotFound = exception.UserNotFound
+    options_name = 'user'
+    attribute_options_names = {'password': 'pass',
+                               'email': 'mail',
+                               'name': 'name',
+                               'enabled': 'enabled',
+                               'default_project_id': 'default_project_id'}
+    immutable_attrs = ['id']
+
+    model = models.User
+
+    def __init__(self, conf):
+        super(UserApi, self).__init__(conf)
+        self.enabled_mask = conf.ldap.user_enabled_mask
+        self.enabled_default = conf.ldap.user_enabled_default
+        self.enabled_invert = conf.ldap.user_enabled_invert
+        self.enabled_emulation = conf.ldap.user_enabled_emulation
+
+    def _ldap_res_to_model(self, res):
+        obj = super(UserApi, self)._ldap_res_to_model(res)
+        if self.enabled_mask != 0:
+            enabled = int(obj.get('enabled', self.enabled_default))
+            obj['enabled'] = ((enabled & self.enabled_mask) !=
+                              self.enabled_mask)
+        elif self.enabled_invert and not self.enabled_emulation:
+            # This could be a bool or a string.  If it's a string,
+            # we need to convert it so we can invert it properly.
+            enabled = obj.get('enabled', self.enabled_default)
+            if isinstance(enabled, six.string_types):
+                if enabled.lower() == 'true':
+                    enabled = True
+                else:
+                    enabled = False
+            obj['enabled'] = not enabled
+        obj['dn'] = res[0]
+
+        return obj
+
+    def mask_enabled_attribute(self, values):
+        value = values['enabled']
+        values.setdefault('enabled_nomask', int(self.enabled_default))
+        if value != ((values['enabled_nomask'] & self.enabled_mask) !=
+                     self.enabled_mask):
+            values['enabled_nomask'] ^= self.enabled_mask
+        values['enabled'] = values['enabled_nomask']
+        del values['enabled_nomask']
+
+    def create(self, values):
+        if self.enabled_mask:
+            orig_enabled = values['enabled']
+            self.mask_enabled_attribute(values)
+        elif self.enabled_invert and not self.enabled_emulation:
+            orig_enabled = values['enabled']
+            if orig_enabled is not None:
+                values['enabled'] = not orig_enabled
+            else:
+                values['enabled'] = self.enabled_default
+        values = super(UserApi, self).create(values)
+        if self.enabled_mask or (self.enabled_invert and
+                                 not self.enabled_emulation):
+            values['enabled'] = orig_enabled
+        return values
+
+    def get_filtered(self, user_id):
+        user = self.get(user_id)
+        return self.filter_attributes(user)
+
+    def get_all_filtered(self, hints):
+        query = self.filter_query(hints)
+        return [self.filter_attributes(user) for user in self.get_all(query)]
+
+    def filter_attributes(self, user):
+        return identity.filter_user(common_ldap.filter_entity(user))
+
+    def is_user(self, dn):
+        """Returns True if the entry is a user."""
+
+        # NOTE(blk-u): It's easy to check if the DN is under the User tree,
+        # but may not be accurate. A more accurate test would be to fetch the
+        # entry to see if it's got the user objectclass, but this could be
+        # really expensive considering how this is used.
+
+        return common_ldap.dn_startswith(dn, self.tree_dn)
+
+
+class GroupApi(common_ldap.BaseLdap):
+    DEFAULT_OU = 'ou=UserGroups'
+    DEFAULT_STRUCTURAL_CLASSES = []
+    DEFAULT_OBJECTCLASS = 'groupOfNames'
+    DEFAULT_ID_ATTR = 'cn'
+    DEFAULT_MEMBER_ATTRIBUTE = 'member'
+    NotFound = exception.GroupNotFound
+    options_name = 'group'
+    attribute_options_names = {'description': 'desc',
+                               'name': 'name'}
+    immutable_attrs = ['name']
+    model = models.Group
+
+    def _ldap_res_to_model(self, res):
+        model = super(GroupApi, self)._ldap_res_to_model(res)
+        model['dn'] = res[0]
+        return model
+
+    def __init__(self, conf):
+        super(GroupApi, self).__init__(conf)
+        self.member_attribute = (conf.ldap.group_member_attribute
+                                 or self.DEFAULT_MEMBER_ATTRIBUTE)
+
+    def create(self, values):
+        data = values.copy()
+        if data.get('id') is None:
+            data['id'] = uuid.uuid4().hex
+        if 'description' in data and data['description'] in ['', None]:
+            data.pop('description')
+        return super(GroupApi, self).create(data)
+
+    def delete(self, group_id):
+        if self.subtree_delete_enabled:
+            super(GroupApi, self).deleteTree(group_id)
+        else:
+            # TODO(spzala): this is only placeholder for group and domain
+            # role support which will be added under bug 1101287
+
+            group_ref = self.get(group_id)
+            group_dn = group_ref['dn']
+            if group_dn:
+                self._delete_tree_nodes(group_dn, ldap.SCOPE_ONELEVEL)
+            super(GroupApi, self).delete(group_id)
+
+    def update(self, group_id, values):
+        old_obj = self.get(group_id)
+        return super(GroupApi, self).update(group_id, values, old_obj)
+
+    def add_user(self, user_dn, group_id, user_id):
+        group_ref = self.get(group_id)
+        group_dn = group_ref['dn']
+        try:
+            super(GroupApi, self).add_member(user_dn, group_dn)
+        except exception.Conflict:
+            raise exception.Conflict(_(
+                'User %(user_id)s is already a member of group %(group_id)s') %
+                {'user_id': user_id, 'group_id': group_id})
+
+    def remove_user(self, user_dn, group_id, user_id):
+        group_ref = self.get(group_id)
+        group_dn = group_ref['dn']
+        try:
+            super(GroupApi, self).remove_member(user_dn, group_dn)
+        except ldap.NO_SUCH_ATTRIBUTE:
+            raise exception.UserNotFound(user_id=user_id)
+
+    def list_user_groups(self, user_dn):
+        """Return a list of groups for which the user is a member."""
+
+        user_dn_esc = ldap.filter.escape_filter_chars(user_dn)
+        query = '(&(objectClass=%s)(%s=%s)%s)' % (self.object_class,
+                                                  self.member_attribute,
+                                                  user_dn_esc,
+                                                  self.ldap_filter or '')
+        return self.get_all(query)
+
+    def list_user_groups_filtered(self, user_dn, hints):
+        """Return a filtered list of groups for which the user is a member."""
+
+        user_dn_esc = ldap.filter.escape_filter_chars(user_dn)
+        query = '(&(objectClass=%s)(%s=%s)%s)' % (self.object_class,
+                                                  self.member_attribute,
+                                                  user_dn_esc,
+                                                  self.ldap_filter or '')
+        return self.get_all_filtered(hints, query)
+
+    def list_group_users(self, group_id):
+        """Return a list of user dns which are members of a group."""
+        group_ref = self.get(group_id)
+        group_dn = group_ref['dn']
+
+        try:
+            attrs = self._ldap_get_list(group_dn, ldap.SCOPE_BASE,
+                                        attrlist=[self.member_attribute])
+        except ldap.NO_SUCH_OBJECT:
+            raise self.NotFound(group_id=group_id)
+
+        users = []
+        for dn, member in attrs:
+            user_dns = member.get(self.member_attribute, [])
+            for user_dn in user_dns:
+                if self._is_dumb_member(user_dn):
+                    continue
+                users.append(user_dn)
+        return users
+
+    def get_filtered(self, group_id):
+        group = self.get(group_id)
+        return common_ldap.filter_entity(group)
+
+    def get_filtered_by_name(self, group_name):
+        group = self.get_by_name(group_name)
+        return common_ldap.filter_entity(group)
+
+    def get_all_filtered(self, hints, query=None):
+        query = self.filter_query(hints, query)
+        return [common_ldap.filter_entity(group)
+                for group in self.get_all(query)]
diff --git a/keystone-moon/keystone/identity/backends/sql.py b/keystone-moon/keystone/identity/backends/sql.py
new file mode 100644 (file)
index 0000000..3986841
--- /dev/null
@@ -0,0 +1,314 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+from keystone.common import sql
+from keystone.common import utils
+from keystone import exception
+from keystone.i18n import _
+from keystone import identity
+
+
+CONF = cfg.CONF
+
+
+class User(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'user'
+    attributes = ['id', 'name', 'domain_id', 'password', 'enabled',
+                  'default_project_id']
+    id = sql.Column(sql.String(64), primary_key=True)
+    name = sql.Column(sql.String(255), nullable=False)
+    domain_id = sql.Column(sql.String(64), nullable=False)
+    password = sql.Column(sql.String(128))
+    enabled = sql.Column(sql.Boolean)
+    extra = sql.Column(sql.JsonBlob())
+    default_project_id = sql.Column(sql.String(64))
+    # Unique constraint across two columns to create the separation
+    # rather than just only 'name' being unique
+    __table_args__ = (sql.UniqueConstraint('domain_id', 'name'), {})
+
+    def to_dict(self, include_extra_dict=False):
+        d = super(User, self).to_dict(include_extra_dict=include_extra_dict)
+        if 'default_project_id' in d and d['default_project_id'] is None:
+            del d['default_project_id']
+        return d
+
+
+class Group(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'group'
+    attributes = ['id', 'name', 'domain_id', 'description']
+    id = sql.Column(sql.String(64), primary_key=True)
+    name = sql.Column(sql.String(64), nullable=False)
+    domain_id = sql.Column(sql.String(64), nullable=False)
+    description = sql.Column(sql.Text())
+    extra = sql.Column(sql.JsonBlob())
+    # Unique constraint across two columns to create the separation
+    # rather than just only 'name' being unique
+    __table_args__ = (sql.UniqueConstraint('domain_id', 'name'), {})
+
+
+class UserGroupMembership(sql.ModelBase, sql.DictBase):
+    """Group membership join table."""
+    __tablename__ = 'user_group_membership'
+    user_id = sql.Column(sql.String(64),
+                         sql.ForeignKey('user.id'),
+                         primary_key=True)
+    group_id = sql.Column(sql.String(64),
+                          sql.ForeignKey('group.id'),
+                          primary_key=True)
+
+
+class Identity(identity.Driver):
+    # NOTE(henry-nash): Override the __init__() method so as to take a
+    # config parameter to enable sql to be used as a domain-specific driver.
+    def __init__(self, conf=None):
+        super(Identity, self).__init__()
+
+    def default_assignment_driver(self):
+        return "keystone.assignment.backends.sql.Assignment"
+
+    @property
+    def is_sql(self):
+        return True
+
+    def _check_password(self, password, user_ref):
+        """Check the specified password against the data store.
+
+        Note that we'll pass in the entire user_ref in case the subclass
+        needs things like user_ref.get('name')
+        For further justification, please see the follow up suggestion at
+        https://blueprints.launchpad.net/keystone/+spec/sql-identiy-pam
+
+        """
+        return utils.check_password(password, user_ref.password)
+
+    # Identity interface
+    def authenticate(self, user_id, password):
+        session = sql.get_session()
+        user_ref = None
+        try:
+            user_ref = self._get_user(session, user_id)
+        except exception.UserNotFound:
+            raise AssertionError(_('Invalid user / password'))
+        if not self._check_password(password, user_ref):
+            raise AssertionError(_('Invalid user / password'))
+        return identity.filter_user(user_ref.to_dict())
+
+    # user crud
+
+    @sql.handle_conflicts(conflict_type='user')
+    def create_user(self, user_id, user):
+        user = utils.hash_user_password(user)
+        session = sql.get_session()
+        with session.begin():
+            user_ref = User.from_dict(user)
+            session.add(user_ref)
+        return identity.filter_user(user_ref.to_dict())
+
+    @sql.truncated
+    def list_users(self, hints):
+        session = sql.get_session()
+        query = session.query(User)
+        user_refs = sql.filter_limit_query(User, query, hints)
+        return [identity.filter_user(x.to_dict()) for x in user_refs]
+
+    def _get_user(self, session, user_id):
+        user_ref = session.query(User).get(user_id)
+        if not user_ref:
+            raise exception.UserNotFound(user_id=user_id)
+        return user_ref
+
+    def get_user(self, user_id):
+        session = sql.get_session()
+        return identity.filter_user(self._get_user(session, user_id).to_dict())
+
+    def get_user_by_name(self, user_name, domain_id):
+        session = sql.get_session()
+        query = session.query(User)
+        query = query.filter_by(name=user_name)
+        query = query.filter_by(domain_id=domain_id)
+        try:
+            user_ref = query.one()
+        except sql.NotFound:
+            raise exception.UserNotFound(user_id=user_name)
+        return identity.filter_user(user_ref.to_dict())
+
+    @sql.handle_conflicts(conflict_type='user')
+    def update_user(self, user_id, user):
+        session = sql.get_session()
+
+        with session.begin():
+            user_ref = self._get_user(session, user_id)
+            old_user_dict = user_ref.to_dict()
+            user = utils.hash_user_password(user)
+            for k in user:
+                old_user_dict[k] = user[k]
+            new_user = User.from_dict(old_user_dict)
+            for attr in User.attributes:
+                if attr != 'id':
+                    setattr(user_ref, attr, getattr(new_user, attr))
+            user_ref.extra = new_user.extra
+        return identity.filter_user(user_ref.to_dict(include_extra_dict=True))
+
+    def add_user_to_group(self, user_id, group_id):
+        session = sql.get_session()
+        self.get_group(group_id)
+        self.get_user(user_id)
+        query = session.query(UserGroupMembership)
+        query = query.filter_by(user_id=user_id)
+        query = query.filter_by(group_id=group_id)
+        rv = query.first()
+        if rv:
+            return
+
+        with session.begin():
+            session.add(UserGroupMembership(user_id=user_id,
+                                            group_id=group_id))
+
+    def check_user_in_group(self, user_id, group_id):
+        session = sql.get_session()
+        self.get_group(group_id)
+        self.get_user(user_id)
+        query = session.query(UserGroupMembership)
+        query = query.filter_by(user_id=user_id)
+        query = query.filter_by(group_id=group_id)
+        if not query.first():
+            raise exception.NotFound(_("User '%(user_id)s' not found in"
+                                       " group '%(group_id)s'") %
+                                     {'user_id': user_id,
+                                      'group_id': group_id})
+
+    def remove_user_from_group(self, user_id, group_id):
+        session = sql.get_session()
+        # We don't check if user or group are still valid and let the remove
+        # be tried anyway - in case this is some kind of clean-up operation
+        query = session.query(UserGroupMembership)
+        query = query.filter_by(user_id=user_id)
+        query = query.filter_by(group_id=group_id)
+        membership_ref = query.first()
+        if membership_ref is None:
+            # Check if the group and user exist to return descriptive
+            # exceptions.
+            self.get_group(group_id)
+            self.get_user(user_id)
+            raise exception.NotFound(_("User '%(user_id)s' not found in"
+                                       " group '%(group_id)s'") %
+                                     {'user_id': user_id,
+                                      'group_id': group_id})
+        with session.begin():
+            session.delete(membership_ref)
+
+    def list_groups_for_user(self, user_id, hints):
+        # TODO(henry-nash) We could implement full filtering here by enhancing
+        # the join below.  However, since it is likely to be a fairly rare
+        # occurrence to filter on more than the user_id already being used
+        # here, this is left as future enhancement and until then we leave
+        # it for the controller to do for us.
+        session = sql.get_session()
+        self.get_user(user_id)
+        query = session.query(Group).join(UserGroupMembership)
+        query = query.filter(UserGroupMembership.user_id == user_id)
+        return [g.to_dict() for g in query]
+
+    def list_users_in_group(self, group_id, hints):
+        # TODO(henry-nash) We could implement full filtering here by enhancing
+        # the join below.  However, since it is likely to be a fairly rare
+        # occurrence to filter on more than the group_id already being used
+        # here, this is left as future enhancement and until then we leave
+        # it for the controller to do for us.
+        session = sql.get_session()
+        self.get_group(group_id)
+        query = session.query(User).join(UserGroupMembership)
+        query = query.filter(UserGroupMembership.group_id == group_id)
+
+        return [identity.filter_user(u.to_dict()) for u in query]
+
+    def delete_user(self, user_id):
+        session = sql.get_session()
+
+        with session.begin():
+            ref = self._get_user(session, user_id)
+
+            q = session.query(UserGroupMembership)
+            q = q.filter_by(user_id=user_id)
+            q.delete(False)
+
+            session.delete(ref)
+
+    # group crud
+
+    @sql.handle_conflicts(conflict_type='group')
+    def create_group(self, group_id, group):
+        session = sql.get_session()
+        with session.begin():
+            ref = Group.from_dict(group)
+            session.add(ref)
+        return ref.to_dict()
+
+    @sql.truncated
+    def list_groups(self, hints):
+        session = sql.get_session()
+        query = session.query(Group)
+        refs = sql.filter_limit_query(Group, query, hints)
+        return [ref.to_dict() for ref in refs]
+
+    def _get_group(self, session, group_id):
+        ref = session.query(Group).get(group_id)
+        if not ref:
+            raise exception.GroupNotFound(group_id=group_id)
+        return ref
+
+    def get_group(self, group_id):
+        session = sql.get_session()
+        return self._get_group(session, group_id).to_dict()
+
+    def get_group_by_name(self, group_name, domain_id):
+        session = sql.get_session()
+        query = session.query(Group)
+        query = query.filter_by(name=group_name)
+        query = query.filter_by(domain_id=domain_id)
+        try:
+            group_ref = query.one()
+        except sql.NotFound:
+            raise exception.GroupNotFound(group_id=group_name)
+        return group_ref.to_dict()
+
+    @sql.handle_conflicts(conflict_type='group')
+    def update_group(self, group_id, group):
+        session = sql.get_session()
+
+        with session.begin():
+            ref = self._get_group(session, group_id)
+            old_dict = ref.to_dict()
+            for k in group:
+                old_dict[k] = group[k]
+            new_group = Group.from_dict(old_dict)
+            for attr in Group.attributes:
+                if attr != 'id':
+                    setattr(ref, attr, getattr(new_group, attr))
+            ref.extra = new_group.extra
+        return ref.to_dict()
+
+    def delete_group(self, group_id):
+        session = sql.get_session()
+
+        with session.begin():
+            ref = self._get_group(session, group_id)
+
+            q = session.query(UserGroupMembership)
+            q = q.filter_by(group_id=group_id)
+            q.delete(False)
+
+            session.delete(ref)
diff --git a/keystone-moon/keystone/identity/controllers.py b/keystone-moon/keystone/identity/controllers.py
new file mode 100644 (file)
index 0000000..a2676c4
--- /dev/null
@@ -0,0 +1,335 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Workflow Logic the Identity service."""
+
+from oslo_config import cfg
+from oslo_log import log
+
+from keystone.common import controller
+from keystone.common import dependency
+from keystone import exception
+from keystone.i18n import _, _LW
+from keystone import notifications
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+@dependency.requires('assignment_api', 'identity_api', 'resource_api')
+class User(controller.V2Controller):
+
+    @controller.v2_deprecated
+    def get_user(self, context, user_id):
+        self.assert_admin(context)
+        ref = self.identity_api.get_user(user_id)
+        return {'user': self.v3_to_v2_user(ref)}
+
+    @controller.v2_deprecated
+    def get_users(self, context):
+        # NOTE(termie): i can't imagine that this really wants all the data
+        #               about every single user in the system...
+        if 'name' in context['query_string']:
+            return self.get_user_by_name(
+                context, context['query_string'].get('name'))
+
+        self.assert_admin(context)
+        user_list = self.identity_api.list_users(
+            CONF.identity.default_domain_id)
+        return {'users': self.v3_to_v2_user(user_list)}
+
+    @controller.v2_deprecated
+    def get_user_by_name(self, context, user_name):
+        self.assert_admin(context)
+        ref = self.identity_api.get_user_by_name(
+            user_name, CONF.identity.default_domain_id)
+        return {'user': self.v3_to_v2_user(ref)}
+
+    # CRUD extension
+    @controller.v2_deprecated
+    def create_user(self, context, user):
+        user = self._normalize_OSKSADM_password_on_request(user)
+        user = self.normalize_username_in_request(user)
+        user = self._normalize_dict(user)
+        self.assert_admin(context)
+
+        if 'name' not in user or not user['name']:
+            msg = _('Name field is required and cannot be empty')
+            raise exception.ValidationError(message=msg)
+        if 'enabled' in user and not isinstance(user['enabled'], bool):
+            msg = _('Enabled field must be a boolean')
+            raise exception.ValidationError(message=msg)
+
+        default_project_id = user.pop('tenantId', None)
+        if default_project_id is not None:
+            # Check to see if the project is valid before moving on.
+            self.resource_api.get_project(default_project_id)
+            user['default_project_id'] = default_project_id
+
+        # The manager layer will generate the unique ID for users
+        user_ref = self._normalize_domain_id(context, user.copy())
+        new_user_ref = self.v3_to_v2_user(
+            self.identity_api.create_user(user_ref))
+
+        if default_project_id is not None:
+            self.assignment_api.add_user_to_project(default_project_id,
+                                                    new_user_ref['id'])
+        return {'user': new_user_ref}
+
+    @controller.v2_deprecated
+    def update_user(self, context, user_id, user):
+        # NOTE(termie): this is really more of a patch than a put
+        user = self.normalize_username_in_request(user)
+        self.assert_admin(context)
+
+        if 'enabled' in user and not isinstance(user['enabled'], bool):
+            msg = _('Enabled field should be a boolean')
+            raise exception.ValidationError(message=msg)
+
+        default_project_id = user.pop('tenantId', None)
+        if default_project_id is not None:
+            user['default_project_id'] = default_project_id
+
+        old_user_ref = self.v3_to_v2_user(
+            self.identity_api.get_user(user_id))
+
+        # Check whether a tenant is being added or changed for the user.
+        # Catch the case where the tenant is being changed for a user and also
+        # where a user previously had no tenant but a tenant is now being
+        # added for the user.
+        if (('tenantId' in old_user_ref and
+                old_user_ref['tenantId'] != default_project_id and
+                default_project_id is not None) or
+            ('tenantId' not in old_user_ref and
+                default_project_id is not None)):
+            # Make sure the new project actually exists before we perform the
+            # user update.
+            self.resource_api.get_project(default_project_id)
+
+        user_ref = self.v3_to_v2_user(
+            self.identity_api.update_user(user_id, user))
+
+        # If 'tenantId' is in either ref, we might need to add or remove the
+        # user from a project.
+        if 'tenantId' in user_ref or 'tenantId' in old_user_ref:
+            if user_ref['tenantId'] != old_user_ref.get('tenantId'):
+                if old_user_ref.get('tenantId'):
+                    try:
+                        member_role_id = CONF.member_role_id
+                        self.assignment_api.remove_role_from_user_and_project(
+                            user_id, old_user_ref['tenantId'], member_role_id)
+                    except exception.NotFound:
+                        # NOTE(morganfainberg): This is not a critical error it
+                        # just means that the user cannot be removed from the
+                        # old tenant.  This could occur if roles aren't found
+                        # or if the project is invalid or if there are no roles
+                        # for the user on that project.
+                        msg = _LW('Unable to remove user %(user)s from '
+                                  '%(tenant)s.')
+                        LOG.warning(msg, {'user': user_id,
+                                          'tenant': old_user_ref['tenantId']})
+
+                if user_ref['tenantId']:
+                    try:
+                        self.assignment_api.add_user_to_project(
+                            user_ref['tenantId'], user_id)
+                    except exception.Conflict:
+                        # We are already a member of that tenant
+                        pass
+                    except exception.NotFound:
+                        # NOTE(morganfainberg): Log this and move on. This is
+                        # not the end of the world if we can't add the user to
+                        # the appropriate tenant. Most of the time this means
+                        # that the project is invalid or roles are some how
+                        # incorrect.  This shouldn't prevent the return of the
+                        # new ref.
+                        msg = _LW('Unable to add user %(user)s to %(tenant)s.')
+                        LOG.warning(msg, {'user': user_id,
+                                          'tenant': user_ref['tenantId']})
+
+        return {'user': user_ref}
+
+    @controller.v2_deprecated
+    def delete_user(self, context, user_id):
+        self.assert_admin(context)
+        self.identity_api.delete_user(user_id)
+
+    @controller.v2_deprecated
+    def set_user_enabled(self, context, user_id, user):
+        return self.update_user(context, user_id, user)
+
+    @controller.v2_deprecated
+    def set_user_password(self, context, user_id, user):
+        user = self._normalize_OSKSADM_password_on_request(user)
+        return self.update_user(context, user_id, user)
+
+    @staticmethod
+    def _normalize_OSKSADM_password_on_request(ref):
+        """Sets the password from the OS-KSADM Admin Extension.
+
+        The OS-KSADM Admin Extension documentation says that
+        `OS-KSADM:password` can be used in place of `password`.
+
+        """
+        if 'OS-KSADM:password' in ref:
+            ref['password'] = ref.pop('OS-KSADM:password')
+        return ref
+
+
+@dependency.requires('identity_api')
+class UserV3(controller.V3Controller):
+    collection_name = 'users'
+    member_name = 'user'
+
+    def __init__(self):
+        super(UserV3, self).__init__()
+        self.get_member_from_driver = self.identity_api.get_user
+
+    def _check_user_and_group_protection(self, context, prep_info,
+                                         user_id, group_id):
+        ref = {}
+        ref['user'] = self.identity_api.get_user(user_id)
+        ref['group'] = self.identity_api.get_group(group_id)
+        self.check_protection(context, prep_info, ref)
+
+    @controller.protected()
+    def create_user(self, context, user):
+        self._require_attribute(user, 'name')
+
+        # The manager layer will generate the unique ID for users
+        ref = self._normalize_dict(user)
+        ref = self._normalize_domain_id(context, ref)
+        initiator = notifications._get_request_audit_info(context)
+        ref = self.identity_api.create_user(ref, initiator)
+        return UserV3.wrap_member(context, ref)
+
+    @controller.filterprotected('domain_id', 'enabled', 'name')
+    def list_users(self, context, filters):
+        hints = UserV3.build_driver_hints(context, filters)
+        refs = self.identity_api.list_users(
+            domain_scope=self._get_domain_id_for_list_request(context),
+            hints=hints)
+        return UserV3.wrap_collection(context, refs, hints=hints)
+
+    @controller.filterprotected('domain_id', 'enabled', 'name')
+    def list_users_in_group(self, context, filters, group_id):
+        hints = UserV3.build_driver_hints(context, filters)
+        refs = self.identity_api.list_users_in_group(group_id, hints=hints)
+        return UserV3.wrap_collection(context, refs, hints=hints)
+
+    @controller.protected()
+    def get_user(self, context, user_id):
+        ref = self.identity_api.get_user(user_id)
+        return UserV3.wrap_member(context, ref)
+
+    def _update_user(self, context, user_id, user):
+        self._require_matching_id(user_id, user)
+        self._require_matching_domain_id(
+            user_id, user, self.identity_api.get_user)
+        initiator = notifications._get_request_audit_info(context)
+        ref = self.identity_api.update_user(user_id, user, initiator)
+        return UserV3.wrap_member(context, ref)
+
+    @controller.protected()
+    def update_user(self, context, user_id, user):
+        return self._update_user(context, user_id, user)
+
+    @controller.protected(callback=_check_user_and_group_protection)
+    def add_user_to_group(self, context, user_id, group_id):
+        self.identity_api.add_user_to_group(user_id, group_id)
+
+    @controller.protected(callback=_check_user_and_group_protection)
+    def check_user_in_group(self, context, user_id, group_id):
+        return self.identity_api.check_user_in_group(user_id, group_id)
+
+    @controller.protected(callback=_check_user_and_group_protection)
+    def remove_user_from_group(self, context, user_id, group_id):
+        self.identity_api.remove_user_from_group(user_id, group_id)
+
+    @controller.protected()
+    def delete_user(self, context, user_id):
+        initiator = notifications._get_request_audit_info(context)
+        return self.identity_api.delete_user(user_id, initiator)
+
+    @controller.protected()
+    def change_password(self, context, user_id, user):
+        original_password = user.get('original_password')
+        if original_password is None:
+            raise exception.ValidationError(target='user',
+                                            attribute='original_password')
+
+        password = user.get('password')
+        if password is None:
+            raise exception.ValidationError(target='user',
+                                            attribute='password')
+        try:
+            self.identity_api.change_password(
+                context, user_id, original_password, password)
+        except AssertionError:
+            raise exception.Unauthorized()
+
+
+@dependency.requires('identity_api')
+class GroupV3(controller.V3Controller):
+    collection_name = 'groups'
+    member_name = 'group'
+
+    def __init__(self):
+        super(GroupV3, self).__init__()
+        self.get_member_from_driver = self.identity_api.get_group
+
+    @controller.protected()
+    def create_group(self, context, group):
+        self._require_attribute(group, 'name')
+
+        # The manager layer will generate the unique ID for groups
+        ref = self._normalize_dict(group)
+        ref = self._normalize_domain_id(context, ref)
+        initiator = notifications._get_request_audit_info(context)
+        ref = self.identity_api.create_group(ref, initiator)
+        return GroupV3.wrap_member(context, ref)
+
+    @controller.filterprotected('domain_id', 'name')
+    def list_groups(self, context, filters):
+        hints = GroupV3.build_driver_hints(context, filters)
+        refs = self.identity_api.list_groups(
+            domain_scope=self._get_domain_id_for_list_request(context),
+            hints=hints)
+        return GroupV3.wrap_collection(context, refs, hints=hints)
+
+    @controller.filterprotected('name')
+    def list_groups_for_user(self, context, filters, user_id):
+        hints = GroupV3.build_driver_hints(context, filters)
+        refs = self.identity_api.list_groups_for_user(user_id, hints=hints)
+        return GroupV3.wrap_collection(context, refs, hints=hints)
+
+    @controller.protected()
+    def get_group(self, context, group_id):
+        ref = self.identity_api.get_group(group_id)
+        return GroupV3.wrap_member(context, ref)
+
+    @controller.protected()
+    def update_group(self, context, group_id, group):
+        self._require_matching_id(group_id, group)
+        self._require_matching_domain_id(
+            group_id, group, self.identity_api.get_group)
+        initiator = notifications._get_request_audit_info(context)
+        ref = self.identity_api.update_group(group_id, group, initiator)
+        return GroupV3.wrap_member(context, ref)
+
+    @controller.protected()
+    def delete_group(self, context, group_id):
+        initiator = notifications._get_request_audit_info(context)
+        self.identity_api.delete_group(group_id, initiator)
diff --git a/keystone-moon/keystone/identity/core.py b/keystone-moon/keystone/identity/core.py
new file mode 100644 (file)
index 0000000..988df78
--- /dev/null
@@ -0,0 +1,1259 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Main entry point into the Identity service."""
+
+import abc
+import functools
+import os
+import uuid
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import importutils
+import six
+
+from keystone import clean
+from keystone.common import cache
+from keystone.common import dependency
+from keystone.common import driver_hints
+from keystone.common import manager
+from keystone import config
+from keystone import exception
+from keystone.i18n import _, _LW
+from keystone.identity.mapping_backends import mapping
+from keystone import notifications
+
+
+CONF = cfg.CONF
+
+LOG = log.getLogger(__name__)
+
+MEMOIZE = cache.get_memoization_decorator(section='identity')
+
+DOMAIN_CONF_FHEAD = 'keystone.'
+DOMAIN_CONF_FTAIL = '.conf'
+
+
+def filter_user(user_ref):
+    """Filter out private items in a user dict.
+
+    'password', 'tenants' and 'groups' are never returned.
+
+    :returns: user_ref
+
+    """
+    if user_ref:
+        user_ref = user_ref.copy()
+        user_ref.pop('password', None)
+        user_ref.pop('tenants', None)
+        user_ref.pop('groups', None)
+        user_ref.pop('domains', None)
+        try:
+            user_ref['extra'].pop('password', None)
+            user_ref['extra'].pop('tenants', None)
+        except KeyError:
+            pass
+    return user_ref
+
+
+@dependency.requires('domain_config_api')
+class DomainConfigs(dict):
+    """Discover, store and provide access to domain specific configs.
+
+    The setup_domain_drivers() call will be made via the wrapper from
+    the first call to any driver function handled by this manager.
+
+    Domain specific configurations are only supported for the identity backend
+    and the individual configurations are either specified in the resource
+    database or in individual domain configuration files, depending on the
+    setting of the 'domain_configurations_from_database' config option.
+
+    The result will be that for each domain with a specific configuration,
+    this class will hold a reference to a ConfigOpts and driver object that
+    the identity manager and driver can use.
+
+    """
+    configured = False
+    driver = None
+    _any_sql = False
+
+    def _load_driver(self, domain_config):
+        return importutils.import_object(
+            domain_config['cfg'].identity.driver, domain_config['cfg'])
+
+    def _assert_no_more_than_one_sql_driver(self, domain_id, new_config,
+                                            config_file=None):
+        """Ensure there is more than one sql driver.
+
+        Check to see if the addition of the driver in this new config
+        would cause there to now be more than one sql driver.
+
+        If we are loading from configuration files, the config_file will hold
+        the name of the file we have just loaded.
+
+        """
+        if (new_config['driver'].is_sql and
+                (self.driver.is_sql or self._any_sql)):
+            # The addition of this driver would cause us to have more than
+            # one sql driver, so raise an exception.
+            if not config_file:
+                config_file = _('Database at /domains/%s/config') % domain_id
+            raise exception.MultipleSQLDriversInConfig(source=config_file)
+        self._any_sql = new_config['driver'].is_sql
+
+    def _load_config_from_file(self, resource_api, file_list, domain_name):
+
+        try:
+            domain_ref = resource_api.get_domain_by_name(domain_name)
+        except exception.DomainNotFound:
+            LOG.warning(
+                _LW('Invalid domain name (%s) found in config file name'),
+                domain_name)
+            return
+
+        # Create a new entry in the domain config dict, which contains
+        # a new instance of both the conf environment and driver using
+        # options defined in this set of config files.  Later, when we
+        # service calls via this Manager, we'll index via this domain
+        # config dict to make sure we call the right driver
+        domain_config = {}
+        domain_config['cfg'] = cfg.ConfigOpts()
+        config.configure(conf=domain_config['cfg'])
+        domain_config['cfg'](args=[], project='keystone',
+                             default_config_files=file_list)
+        domain_config['driver'] = self._load_driver(domain_config)
+        self._assert_no_more_than_one_sql_driver(domain_ref['id'],
+                                                 domain_config,
+                                                 config_file=file_list)
+        self[domain_ref['id']] = domain_config
+
+    def _setup_domain_drivers_from_files(self, standard_driver, resource_api):
+        """Read the domain specific configuration files and load the drivers.
+
+        Domain configuration files are stored in the domain config directory,
+        and must be named of the form:
+
+        keystone.<domain_name>.conf
+
+        For each file, call the load config method where the domain_name
+        will be turned into a domain_id and then:
+
+        - Create a new config structure, adding in the specific additional
+          options defined in this config file
+        - Initialise a new instance of the required driver with this new config
+
+        """
+        conf_dir = CONF.identity.domain_config_dir
+        if not os.path.exists(conf_dir):
+            LOG.warning(_LW('Unable to locate domain config directory: %s'),
+                        conf_dir)
+            return
+
+        for r, d, f in os.walk(conf_dir):
+            for fname in f:
+                if (fname.startswith(DOMAIN_CONF_FHEAD) and
+                        fname.endswith(DOMAIN_CONF_FTAIL)):
+                    if fname.count('.') >= 2:
+                        self._load_config_from_file(
+                            resource_api, [os.path.join(r, fname)],
+                            fname[len(DOMAIN_CONF_FHEAD):
+                                  -len(DOMAIN_CONF_FTAIL)])
+                    else:
+                        LOG.debug(('Ignoring file (%s) while scanning domain '
+                                   'config directory'),
+                                  fname)
+
+    def _load_config_from_database(self, domain_id, specific_config):
+        domain_config = {}
+        domain_config['cfg'] = cfg.ConfigOpts()
+        config.configure(conf=domain_config['cfg'])
+        domain_config['cfg'](args=[], project='keystone')
+
+        # Override any options that have been passed in as specified in the
+        # database.
+        for group in specific_config:
+            for option in specific_config[group]:
+                domain_config['cfg'].set_override(
+                    option, specific_config[group][option], group)
+
+        domain_config['driver'] = self._load_driver(domain_config)
+        self._assert_no_more_than_one_sql_driver(domain_id, domain_config)
+        self[domain_id] = domain_config
+
+    def _setup_domain_drivers_from_database(self, standard_driver,
+                                            resource_api):
+        """Read domain specific configuration from database and load drivers.
+
+        Domain configurations are stored in the domain-config backend,
+        so we go through each domain to find those that have a specific config
+        defined, and for those that do we:
+
+        - Create a new config structure, overriding any specific options
+          defined in the resource backend
+        - Initialise a new instance of the required driver with this new config
+
+        """
+        for domain in resource_api.list_domains():
+            domain_config_options = (
+                self.domain_config_api.
+                get_config_with_sensitive_info(domain['id']))
+            if domain_config_options:
+                self._load_config_from_database(domain['id'],
+                                                domain_config_options)
+
+    def setup_domain_drivers(self, standard_driver, resource_api):
+        # This is called by the api call wrapper
+        self.configured = True
+        self.driver = standard_driver
+
+        if CONF.identity.domain_configurations_from_database:
+            self._setup_domain_drivers_from_database(standard_driver,
+                                                     resource_api)
+        else:
+            self._setup_domain_drivers_from_files(standard_driver,
+                                                  resource_api)
+
+    def get_domain_driver(self, domain_id):
+        if domain_id in self:
+            return self[domain_id]['driver']
+
+    def get_domain_conf(self, domain_id):
+        if domain_id in self:
+            return self[domain_id]['cfg']
+        else:
+            return CONF
+
+    def reload_domain_driver(self, domain_id):
+        # Only used to support unit tests that want to set
+        # new config values.  This should only be called once
+        # the domains have been configured, since it relies on
+        # the fact that the configuration files/database have already been
+        # read.
+        if self.configured:
+            if domain_id in self:
+                self[domain_id]['driver'] = (
+                    self._load_driver(self[domain_id]))
+            else:
+                # The standard driver
+                self.driver = self.driver()
+
+
+def domains_configured(f):
+    """Wraps API calls to lazy load domain configs after init.
+
+    This is required since the assignment manager needs to be initialized
+    before this manager, and yet this manager's init wants to be
+    able to make assignment calls (to build the domain configs).  So
+    instead, we check if the domains have been initialized on entry
+    to each call, and if requires load them,
+
+    """
+    @functools.wraps(f)
+    def wrapper(self, *args, **kwargs):
+        if (not self.domain_configs.configured and
+                CONF.identity.domain_specific_drivers_enabled):
+            self.domain_configs.setup_domain_drivers(
+                self.driver, self.resource_api)
+        return f(self, *args, **kwargs)
+    return wrapper
+
+
+def exception_translated(exception_type):
+    """Wraps API calls to map to correct exception."""
+
+    def _exception_translated(f):
+        @functools.wraps(f)
+        def wrapper(self, *args, **kwargs):
+            try:
+                return f(self, *args, **kwargs)
+            except exception.PublicIDNotFound as e:
+                if exception_type == 'user':
+                    raise exception.UserNotFound(user_id=str(e))
+                elif exception_type == 'group':
+                    raise exception.GroupNotFound(group_id=str(e))
+                elif exception_type == 'assertion':
+                    raise AssertionError(_('Invalid user / password'))
+                else:
+                    raise
+        return wrapper
+    return _exception_translated
+
+
+@dependency.provider('identity_api')
+@dependency.requires('assignment_api', 'credential_api', 'id_mapping_api',
+                     'resource_api', 'revoke_api')
+class Manager(manager.Manager):
+    """Default pivot point for the Identity backend.
+
+    See :mod:`keystone.common.manager.Manager` for more details on how this
+    dynamically calls the backend.
+
+    This class also handles the support of domain specific backends, by using
+    the DomainConfigs class. The setup call for DomainConfigs is called
+    from with the @domains_configured wrapper in a lazy loading fashion
+    to get around the fact that we can't satisfy the assignment api it needs
+    from within our __init__() function since the assignment driver is not
+    itself yet initialized.
+
+    Each of the identity calls are pre-processed here to choose, based on
+    domain, which of the drivers should be called. The non-domain-specific
+    driver is still in place, and is used if there is no specific driver for
+    the domain in question (or we are not using multiple domain drivers).
+
+    Starting with Juno, in order to be able to obtain the domain from
+    just an ID being presented as part of an API call, a public ID to domain
+    and local ID mapping is maintained.  This mapping also allows for the local
+    ID of drivers that do not provide simple UUIDs (such as LDAP) to be
+    referenced via a public facing ID.  The mapping itself is automatically
+    generated as entities are accessed via the driver.
+
+    This mapping is only used when:
+    - the entity is being handled by anything other than the default driver, or
+    - the entity is being handled by the default LDAP driver and backward
+    compatible IDs are not required.
+
+    This means that in the standard case of a single SQL backend or the default
+    settings of a single LDAP backend (since backward compatible IDs is set to
+    True by default), no mapping is used. An alternative approach would be to
+    always use the mapping table, but in the cases where we don't need it to
+    make the public and local IDs the same. It is felt that not using the
+    mapping by default is a more prudent way to introduce this functionality.
+
+    """
+    _USER = 'user'
+    _GROUP = 'group'
+
+    def __init__(self):
+        super(Manager, self).__init__(CONF.identity.driver)
+        self.domain_configs = DomainConfigs()
+
+        self.event_callbacks = {
+            notifications.ACTIONS.deleted: {
+                'domain': [self._domain_deleted],
+            },
+        }
+
+    def _domain_deleted(self, service, resource_type, operation,
+                        payload):
+        domain_id = payload['resource_info']
+
+        user_refs = self.list_users(domain_scope=domain_id)
+        group_refs = self.list_groups(domain_scope=domain_id)
+
+        for group in group_refs:
+            # Cleanup any existing groups.
+            try:
+                self.delete_group(group['id'])
+            except exception.GroupNotFound:
+                LOG.debug(('Group %(groupid)s not found when deleting domain '
+                           'contents for %(domainid)s, continuing with '
+                           'cleanup.'),
+                          {'groupid': group['id'], 'domainid': domain_id})
+
+        # And finally, delete the users themselves
+        for user in user_refs:
+            try:
+                self.delete_user(user['id'])
+            except exception.UserNotFound:
+                LOG.debug(('User %(userid)s not found when deleting domain '
+                           'contents for %(domainid)s, continuing with '
+                           'cleanup.'),
+                          {'userid': user['id'], 'domainid': domain_id})
+
+    # Domain ID normalization methods
+
+    def _set_domain_id_and_mapping(self, ref, domain_id, driver,
+                                   entity_type):
+        """Patch the domain_id/public_id into the resulting entity(ies).
+
+        :param ref: the entity or list of entities to post process
+        :param domain_id: the domain scope used for the call
+        :param driver: the driver used to execute the call
+        :param entity_type: whether this is a user or group
+
+        :returns: post processed entity or list or entities
+
+        Called to post-process the entity being returned, using a mapping
+        to substitute a public facing ID as necessary. This method must
+        take into account:
+
+        - If the driver is not domain aware, then we must set the domain
+          attribute of all entities irrespective of mapping.
+        - If the driver does not support UUIDs, then we always want to provide
+          a mapping, except for the special case of this being the default
+          driver and backward_compatible_ids is set to True. This is to ensure
+          that entity IDs do not change for an existing LDAP installation (only
+          single domain/driver LDAP configurations were previously supported).
+        - If the driver does support UUIDs, then we always create a mapping
+          entry, but use the local UUID as the public ID.  The exception to
+        - this is that if we just have single driver (i.e. not using specific
+          multi-domain configs), then we don't both with the mapping at all.
+
+        """
+        conf = CONF.identity
+
+        if not self._needs_post_processing(driver):
+            # a classic case would be when running with a single SQL driver
+            return ref
+
+        LOG.debug('ID Mapping - Domain ID: %(domain)s, '
+                  'Default Driver: %(driver)s, '
+                  'Domains: %(aware)s, UUIDs: %(generate)s, '
+                  'Compatible IDs: %(compat)s',
+                  {'domain': domain_id,
+                   'driver': (driver == self.driver),
+                   'aware': driver.is_domain_aware(),
+                   'generate': driver.generates_uuids(),
+                   'compat': CONF.identity_mapping.backward_compatible_ids})
+
+        if isinstance(ref, dict):
+            return self._set_domain_id_and_mapping_for_single_ref(
+                ref, domain_id, driver, entity_type, conf)
+        elif isinstance(ref, list):
+            return [self._set_domain_id_and_mapping(
+                    x, domain_id, driver, entity_type) for x in ref]
+        else:
+            raise ValueError(_('Expected dict or list: %s') % type(ref))
+
+    def _needs_post_processing(self, driver):
+        """Returns whether entity from driver needs domain added or mapping."""
+        return (driver is not self.driver or not driver.generates_uuids() or
+                not driver.is_domain_aware())
+
+    def _set_domain_id_and_mapping_for_single_ref(self, ref, domain_id,
+                                                  driver, entity_type, conf):
+        LOG.debug('Local ID: %s', ref['id'])
+        ref = ref.copy()
+
+        self._insert_domain_id_if_needed(ref, driver, domain_id, conf)
+
+        if self._is_mapping_needed(driver):
+            local_entity = {'domain_id': ref['domain_id'],
+                            'local_id': ref['id'],
+                            'entity_type': entity_type}
+            public_id = self.id_mapping_api.get_public_id(local_entity)
+            if public_id:
+                ref['id'] = public_id
+                LOG.debug('Found existing mapping to public ID: %s',
+                          ref['id'])
+            else:
+                # Need to create a mapping. If the driver generates UUIDs
+                # then pass the local UUID in as the public ID to use.
+                if driver.generates_uuids():
+                    public_id = ref['id']
+                ref['id'] = self.id_mapping_api.create_id_mapping(
+                    local_entity, public_id)
+                LOG.debug('Created new mapping to public ID: %s',
+                          ref['id'])
+        return ref
+
+    def _insert_domain_id_if_needed(self, ref, driver, domain_id, conf):
+        """Inserts the domain ID into the ref, if required.
+
+        If the driver can't handle domains, then we need to insert the
+        domain_id into the entity being returned.  If the domain_id is
+        None that means we are running in a single backend mode, so to
+        remain backwardly compatible, we put in the default domain ID.
+        """
+        if not driver.is_domain_aware():
+            if domain_id is None:
+                domain_id = conf.default_domain_id
+            ref['domain_id'] = domain_id
+
+    def _is_mapping_needed(self, driver):
+        """Returns whether mapping is needed.
+
+        There are two situations where we must use the mapping:
+        - this isn't the default driver (i.e. multiple backends), or
+        - we have a single backend that doesn't use UUIDs
+        The exception to the above is that we must honor backward
+        compatibility if this is the default driver (e.g. to support
+        current LDAP)
+        """
+        is_not_default_driver = driver is not self.driver
+        return (is_not_default_driver or (
+            not driver.generates_uuids() and
+            not CONF.identity_mapping.backward_compatible_ids))
+
+    def _clear_domain_id_if_domain_unaware(self, driver, ref):
+        """Clear domain_id details if driver is not domain aware."""
+        if not driver.is_domain_aware() and 'domain_id' in ref:
+            ref = ref.copy()
+            ref.pop('domain_id')
+        return ref
+
+    def _select_identity_driver(self, domain_id):
+        """Choose a backend driver for the given domain_id.
+
+        :param domain_id: The domain_id for which we want to find a driver.  If
+                          the domain_id is specified as None, then this means
+                          we need a driver that handles multiple domains.
+
+        :returns: chosen backend driver
+
+        If there is a specific driver defined for this domain then choose it.
+        If the domain is None, or there no specific backend for the given
+        domain is found, then we chose the default driver.
+
+        """
+        if domain_id is None:
+            driver = self.driver
+        else:
+            driver = (self.domain_configs.get_domain_driver(domain_id) or
+                      self.driver)
+
+        # If the driver is not domain aware (e.g. LDAP) then check to
+        # ensure we are not mapping multiple domains onto it - the only way
+        # that would happen is that the default driver is LDAP and the
+        # domain is anything other than None or the default domain.
+        if (not driver.is_domain_aware() and driver == self.driver and
+            domain_id != CONF.identity.default_domain_id and
+                domain_id is not None):
+                    LOG.warning('Found multiple domains being mapped to a '
+                                'driver that does not support that (e.g. '
+                                'LDAP) - Domain ID: %(domain)s, '
+                                'Default Driver: %(driver)s',
+                                {'domain': domain_id,
+                                 'driver': (driver == self.driver)})
+                    raise exception.DomainNotFound(domain_id=domain_id)
+        return driver
+
+    def _get_domain_driver_and_entity_id(self, public_id):
+        """Look up details using the public ID.
+
+        :param public_id: the ID provided in the call
+
+        :returns: domain_id, which can be None to indicate that the driver
+                  in question supports multiple domains
+                  driver selected based on this domain
+                  entity_id which will is understood by the driver.
+
+        Use the mapping table to look up the domain, driver and local entity
+        that is represented by the provided public ID.  Handle the situations
+        were we do not use the mapping (e.g. single driver that understands
+        UUIDs etc.)
+
+        """
+        conf = CONF.identity
+        # First, since we don't know anything about the entity yet, we must
+        # assume it needs mapping, so long as we are using domain specific
+        # drivers.
+        if conf.domain_specific_drivers_enabled:
+            local_id_ref = self.id_mapping_api.get_id_mapping(public_id)
+            if local_id_ref:
+                return (
+                    local_id_ref['domain_id'],
+                    self._select_identity_driver(local_id_ref['domain_id']),
+                    local_id_ref['local_id'])
+
+        # So either we are using multiple drivers but the public ID is invalid
+        # (and hence was not found in the mapping table), or the public ID is
+        # being handled by the default driver.  Either way, the only place left
+        # to look is in that standard driver. However, we don't yet know if
+        # this driver also needs mapping (e.g. LDAP in non backward
+        # compatibility mode).
+        driver = self.driver
+        if driver.generates_uuids():
+            if driver.is_domain_aware:
+                # No mapping required, and the driver can handle the domain
+                # information itself.  The classic case of this is the
+                # current SQL driver.
+                return (None, driver, public_id)
+            else:
+                # Although we don't have any drivers of this type, i.e. that
+                # understand UUIDs but not domains, conceptually you could.
+                return (conf.default_domain_id, driver, public_id)
+
+        # So the only place left to find the ID is in the default driver which
+        # we now know doesn't generate UUIDs
+        if not CONF.identity_mapping.backward_compatible_ids:
+            # We are not running in backward compatibility mode, so we
+            # must use a mapping.
+            local_id_ref = self.id_mapping_api.get_id_mapping(public_id)
+            if local_id_ref:
+                return (
+                    local_id_ref['domain_id'],
+                    driver,
+                    local_id_ref['local_id'])
+            else:
+                raise exception.PublicIDNotFound(id=public_id)
+
+        # If we reach here, this means that the default driver
+        # requires no mapping - but also doesn't understand domains
+        # (e.g. the classic single LDAP driver situation). Hence we pass
+        # back the public_ID unmodified and use the default domain (to
+        # keep backwards compatibility with existing installations).
+        #
+        # It is still possible that the public ID is just invalid in
+        # which case we leave this to the caller to check.
+        return (conf.default_domain_id, driver, public_id)
+
+    def _assert_user_and_group_in_same_backend(
+            self, user_entity_id, user_driver, group_entity_id, group_driver):
+        """Ensures that user and group IDs are backed by the same backend.
+
+        Raise a CrossBackendNotAllowed exception if they are not from the same
+        backend, otherwise return None.
+
+        """
+        if user_driver is not group_driver:
+            # Determine first if either IDs don't exist by calling
+            # the driver.get methods (which will raise a NotFound
+            # exception).
+            user_driver.get_user(user_entity_id)
+            group_driver.get_group(group_entity_id)
+            # If we get here, then someone is attempting to create a cross
+            # backend membership, which is not allowed.
+            raise exception.CrossBackendNotAllowed(group_id=group_entity_id,
+                                                   user_id=user_entity_id)
+
+    def _mark_domain_id_filter_satisfied(self, hints):
+        if hints:
+            for filter in hints.filters:
+                if (filter['name'] == 'domain_id' and
+                        filter['comparator'] == 'equals'):
+                    hints.filters.remove(filter)
+
+    def _ensure_domain_id_in_hints(self, hints, domain_id):
+        if (domain_id is not None and
+                not hints.get_exact_filter_by_name('domain_id')):
+            hints.add_filter('domain_id', domain_id)
+
+    # The actual driver calls - these are pre/post processed here as
+    # part of the Manager layer to make sure we:
+    #
+    # - select the right driver for this domain
+    # - clear/set domain_ids for drivers that do not support domains
+    # - create any ID mapping that might be required
+
+    @notifications.emit_event('authenticate')
+    @domains_configured
+    @exception_translated('assertion')
+    def authenticate(self, context, user_id, password):
+        domain_id, driver, entity_id = (
+            self._get_domain_driver_and_entity_id(user_id))
+        ref = driver.authenticate(entity_id, password)
+        return self._set_domain_id_and_mapping(
+            ref, domain_id, driver, mapping.EntityType.USER)
+
+    @domains_configured
+    @exception_translated('user')
+    def create_user(self, user_ref, initiator=None):
+        user = user_ref.copy()
+        user['name'] = clean.user_name(user['name'])
+        user.setdefault('enabled', True)
+        user['enabled'] = clean.user_enabled(user['enabled'])
+        domain_id = user['domain_id']
+        self.resource_api.get_domain(domain_id)
+
+        # For creating a user, the domain is in the object itself
+        domain_id = user_ref['domain_id']
+        driver = self._select_identity_driver(domain_id)
+        user = self._clear_domain_id_if_domain_unaware(driver, user)
+        # Generate a local ID - in the future this might become a function of
+        # the underlying driver so that it could conform to rules set down by
+        # that particular driver type.
+        user['id'] = uuid.uuid4().hex
+        ref = driver.create_user(user['id'], user)
+        notifications.Audit.created(self._USER, user['id'], initiator)
+        return self._set_domain_id_and_mapping(
+            ref, domain_id, driver, mapping.EntityType.USER)
+
+    @domains_configured
+    @exception_translated('user')
+    @MEMOIZE
+    def get_user(self, user_id):
+        domain_id, driver, entity_id = (
+            self._get_domain_driver_and_entity_id(user_id))
+        ref = driver.get_user(entity_id)
+        return self._set_domain_id_and_mapping(
+            ref, domain_id, driver, mapping.EntityType.USER)
+
+    def assert_user_enabled(self, user_id, user=None):
+        """Assert the user and the user's domain are enabled.
+
+        :raise AssertionError if the user or the user's domain is disabled.
+        """
+        if user is None:
+            user = self.get_user(user_id)
+        self.resource_api.assert_domain_enabled(user['domain_id'])
+        if not user.get('enabled', True):
+            raise AssertionError(_('User is disabled: %s') % user_id)
+
+    @domains_configured
+    @exception_translated('user')
+    @MEMOIZE
+    def get_user_by_name(self, user_name, domain_id):
+        driver = self._select_identity_driver(domain_id)
+        ref = driver.get_user_by_name(user_name, domain_id)
+        return self._set_domain_id_and_mapping(
+            ref, domain_id, driver, mapping.EntityType.USER)
+
+    @manager.response_truncated
+    @domains_configured
+    @exception_translated('user')
+    def list_users(self, domain_scope=None, hints=None):
+        driver = self._select_identity_driver(domain_scope)
+        hints = hints or driver_hints.Hints()
+        if driver.is_domain_aware():
+            # Force the domain_scope into the hint to ensure that we only get
+            # back domains for that scope.
+            self._ensure_domain_id_in_hints(hints, domain_scope)
+        else:
+            # We are effectively satisfying any domain_id filter by the above
+            # driver selection, so remove any such filter.
+            self._mark_domain_id_filter_satisfied(hints)
+        ref_list = driver.list_users(hints)
+        return self._set_domain_id_and_mapping(
+            ref_list, domain_scope, driver, mapping.EntityType.USER)
+
+    @domains_configured
+    @exception_translated('user')
+    def update_user(self, user_id, user_ref, initiator=None):
+        old_user_ref = self.get_user(user_id)
+        user = user_ref.copy()
+        if 'name' in user:
+            user['name'] = clean.user_name(user['name'])
+        if 'enabled' in user:
+            user['enabled'] = clean.user_enabled(user['enabled'])
+        if 'domain_id' in user:
+            self.resource_api.get_domain(user['domain_id'])
+        if 'id' in user:
+            if user_id != user['id']:
+                raise exception.ValidationError(_('Cannot change user ID'))
+            # Since any ID in the user dict is now irrelevant, remove its so as
+            # the driver layer won't be confused by the fact the this is the
+            # public ID not the local ID
+            user.pop('id')
+
+        domain_id, driver, entity_id = (
+            self._get_domain_driver_and_entity_id(user_id))
+        user = self._clear_domain_id_if_domain_unaware(driver, user)
+        self.get_user.invalidate(self, old_user_ref['id'])
+        self.get_user_by_name.invalidate(self, old_user_ref['name'],
+                                         old_user_ref['domain_id'])
+
+        ref = driver.update_user(entity_id, user)
+
+        notifications.Audit.updated(self._USER, user_id, initiator)
+
+        enabled_change = ((user.get('enabled') is False) and
+                          user['enabled'] != old_user_ref.get('enabled'))
+        if enabled_change or user.get('password') is not None:
+            self.emit_invalidate_user_token_persistence(user_id)
+
+        return self._set_domain_id_and_mapping(
+            ref, domain_id, driver, mapping.EntityType.USER)
+
+    @domains_configured
+    @exception_translated('user')
+    def delete_user(self, user_id, initiator=None):
+        domain_id, driver, entity_id = (
+            self._get_domain_driver_and_entity_id(user_id))
+        # Get user details to invalidate the cache.
+        user_old = self.get_user(user_id)
+        driver.delete_user(entity_id)
+        self.assignment_api.delete_user(user_id)
+        self.get_user.invalidate(self, user_id)
+        self.get_user_by_name.invalidate(self, user_old['name'],
+                                         user_old['domain_id'])
+        self.credential_api.delete_credentials_for_user(user_id)
+        self.id_mapping_api.delete_id_mapping(user_id)
+        notifications.Audit.deleted(self._USER, user_id, initiator)
+
+    @domains_configured
+    @exception_translated('group')
+    def create_group(self, group_ref, initiator=None):
+        group = group_ref.copy()
+        group.setdefault('description', '')
+        domain_id = group['domain_id']
+        self.resource_api.get_domain(domain_id)
+
+        # For creating a group, the domain is in the object itself
+        domain_id = group_ref['domain_id']
+        driver = self._select_identity_driver(domain_id)
+        group = self._clear_domain_id_if_domain_unaware(driver, group)
+        # Generate a local ID - in the future this might become a function of
+        # the underlying driver so that it could conform to rules set down by
+        # that particular driver type.
+        group['id'] = uuid.uuid4().hex
+        ref = driver.create_group(group['id'], group)
+
+        notifications.Audit.created(self._GROUP, group['id'], initiator)
+
+        return self._set_domain_id_and_mapping(
+            ref, domain_id, driver, mapping.EntityType.GROUP)
+
+    @domains_configured
+    @exception_translated('group')
+    @MEMOIZE
+    def get_group(self, group_id):
+        domain_id, driver, entity_id = (
+            self._get_domain_driver_and_entity_id(group_id))
+        ref = driver.get_group(entity_id)
+        return self._set_domain_id_and_mapping(
+            ref, domain_id, driver, mapping.EntityType.GROUP)
+
+    @domains_configured
+    @exception_translated('group')
+    def get_group_by_name(self, group_name, domain_id):
+        driver = self._select_identity_driver(domain_id)
+        ref = driver.get_group_by_name(group_name, domain_id)
+        return self._set_domain_id_and_mapping(
+            ref, domain_id, driver, mapping.EntityType.GROUP)
+
+    @domains_configured
+    @exception_translated('group')
+    def update_group(self, group_id, group, initiator=None):
+        if 'domain_id' in group:
+            self.resource_api.get_domain(group['domain_id'])
+        domain_id, driver, entity_id = (
+            self._get_domain_driver_and_entity_id(group_id))
+        group = self._clear_domain_id_if_domain_unaware(driver, group)
+        ref = driver.update_group(entity_id, group)
+        self.get_group.invalidate(self, group_id)
+        notifications.Audit.updated(self._GROUP, group_id, initiator)
+        return self._set_domain_id_and_mapping(
+            ref, domain_id, driver, mapping.EntityType.GROUP)
+
+    @domains_configured
+    @exception_translated('group')
+    def delete_group(self, group_id, initiator=None):
+        domain_id, driver, entity_id = (
+            self._get_domain_driver_and_entity_id(group_id))
+        user_ids = (u['id'] for u in self.list_users_in_group(group_id))
+        driver.delete_group(entity_id)
+        self.get_group.invalidate(self, group_id)
+        self.id_mapping_api.delete_id_mapping(group_id)
+        self.assignment_api.delete_group(group_id)
+
+        notifications.Audit.deleted(self._GROUP, group_id, initiator)
+
+        for uid in user_ids:
+            self.emit_invalidate_user_token_persistence(uid)
+
+    @domains_configured
+    @exception_translated('group')
+    def add_user_to_group(self, user_id, group_id):
+        @exception_translated('user')
+        def get_entity_info_for_user(public_id):
+            return self._get_domain_driver_and_entity_id(public_id)
+
+        _domain_id, group_driver, group_entity_id = (
+            self._get_domain_driver_and_entity_id(group_id))
+        # Get the same info for the user_id, taking care to map any
+        # exceptions correctly
+        _domain_id, user_driver, user_entity_id = (
+            get_entity_info_for_user(user_id))
+
+        self._assert_user_and_group_in_same_backend(
+            user_entity_id, user_driver, group_entity_id, group_driver)
+
+        group_driver.add_user_to_group(user_entity_id, group_entity_id)
+
+    @domains_configured
+    @exception_translated('group')
+    def remove_user_from_group(self, user_id, group_id):
+        @exception_translated('user')
+        def get_entity_info_for_user(public_id):
+            return self._get_domain_driver_and_entity_id(public_id)
+
+        _domain_id, group_driver, group_entity_id = (
+            self._get_domain_driver_and_entity_id(group_id))
+        # Get the same info for the user_id, taking care to map any
+        # exceptions correctly
+        _domain_id, user_driver, user_entity_id = (
+            get_entity_info_for_user(user_id))
+
+        self._assert_user_and_group_in_same_backend(
+            user_entity_id, user_driver, group_entity_id, group_driver)
+
+        group_driver.remove_user_from_group(user_entity_id, group_entity_id)
+        self.emit_invalidate_user_token_persistence(user_id)
+
+    @notifications.internal(notifications.INVALIDATE_USER_TOKEN_PERSISTENCE)
+    def emit_invalidate_user_token_persistence(self, user_id):
+        """Emit a notification to the callback system to revoke user tokens.
+
+        This method and associated callback listener removes the need for
+        making a direct call to another manager to delete and revoke tokens.
+
+        :param user_id: user identifier
+        :type user_id: string
+        """
+        pass
+
+    @manager.response_truncated
+    @domains_configured
+    @exception_translated('user')
+    def list_groups_for_user(self, user_id, hints=None):
+        domain_id, driver, entity_id = (
+            self._get_domain_driver_and_entity_id(user_id))
+        hints = hints or driver_hints.Hints()
+        if not driver.is_domain_aware():
+            # We are effectively satisfying any domain_id filter by the above
+            # driver selection, so remove any such filter
+            self._mark_domain_id_filter_satisfied(hints)
+        ref_list = driver.list_groups_for_user(entity_id, hints)
+        return self._set_domain_id_and_mapping(
+            ref_list, domain_id, driver, mapping.EntityType.GROUP)
+
+    @manager.response_truncated
+    @domains_configured
+    @exception_translated('group')
+    def list_groups(self, domain_scope=None, hints=None):
+        driver = self._select_identity_driver(domain_scope)
+        hints = hints or driver_hints.Hints()
+        if driver.is_domain_aware():
+            # Force the domain_scope into the hint to ensure that we only get
+            # back domains for that scope.
+            self._ensure_domain_id_in_hints(hints, domain_scope)
+        else:
+            # We are effectively satisfying any domain_id filter by the above
+            # driver selection, so remove any such filter.
+            self._mark_domain_id_filter_satisfied(hints)
+        ref_list = driver.list_groups(hints)
+        return self._set_domain_id_and_mapping(
+            ref_list, domain_scope, driver, mapping.EntityType.GROUP)
+
+    @manager.response_truncated
+    @domains_configured
+    @exception_translated('group')
+    def list_users_in_group(self, group_id, hints=None):
+        domain_id, driver, entity_id = (
+            self._get_domain_driver_and_entity_id(group_id))
+        hints = hints or driver_hints.Hints()
+        if not driver.is_domain_aware():
+            # We are effectively satisfying any domain_id filter by the above
+            # driver selection, so remove any such filter
+            self._mark_domain_id_filter_satisfied(hints)
+        ref_list = driver.list_users_in_group(entity_id, hints)
+        return self._set_domain_id_and_mapping(
+            ref_list, domain_id, driver, mapping.EntityType.USER)
+
+    @domains_configured
+    @exception_translated('group')
+    def check_user_in_group(self, user_id, group_id):
+        @exception_translated('user')
+        def get_entity_info_for_user(public_id):
+            return self._get_domain_driver_and_entity_id(public_id)
+
+        _domain_id, group_driver, group_entity_id = (
+            self._get_domain_driver_and_entity_id(group_id))
+        # Get the same info for the user_id, taking care to map any
+        # exceptions correctly
+        _domain_id, user_driver, user_entity_id = (
+            get_entity_info_for_user(user_id))
+
+        self._assert_user_and_group_in_same_backend(
+            user_entity_id, user_driver, group_entity_id, group_driver)
+
+        return group_driver.check_user_in_group(user_entity_id,
+                                                group_entity_id)
+
+    @domains_configured
+    def change_password(self, context, user_id, original_password,
+                        new_password):
+
+        # authenticate() will raise an AssertionError if authentication fails
+        self.authenticate(context, user_id, original_password)
+
+        update_dict = {'password': new_password}
+        self.update_user(user_id, update_dict)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Driver(object):
+    """Interface description for an Identity driver."""
+
+    def _get_list_limit(self):
+        return CONF.identity.list_limit or CONF.list_limit
+
+    def is_domain_aware(self):
+        """Indicates if Driver supports domains."""
+        return True
+
+    @property
+    def is_sql(self):
+        """Indicates if this Driver uses SQL."""
+        return False
+
+    @property
+    def multiple_domains_supported(self):
+        return (self.is_domain_aware() or
+                CONF.identity.domain_specific_drivers_enabled)
+
+    def generates_uuids(self):
+        """Indicates if Driver generates UUIDs as the local entity ID."""
+        return True
+
+    @abc.abstractmethod
+    def authenticate(self, user_id, password):
+        """Authenticate a given user and password.
+        :returns: user_ref
+        :raises: AssertionError
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # user crud
+
+    @abc.abstractmethod
+    def create_user(self, user_id, user):
+        """Creates a new user.
+
+        :raises: keystone.exception.Conflict
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_users(self, hints):
+        """List users in the system.
+
+        :param hints: filter hints which the driver should
+                      implement if at all possible.
+
+        :returns: a list of user_refs or an empty list.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_users_in_group(self, group_id, hints):
+        """List users in a group.
+
+        :param group_id: the group in question
+        :param hints: filter hints which the driver should
+                      implement if at all possible.
+
+        :returns: a list of user_refs or an empty list.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_user(self, user_id):
+        """Get a user by ID.
+
+        :returns: user_ref
+        :raises: keystone.exception.UserNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def update_user(self, user_id, user):
+        """Updates an existing user.
+
+        :raises: keystone.exception.UserNotFound,
+                 keystone.exception.Conflict
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def add_user_to_group(self, user_id, group_id):
+        """Adds a user to a group.
+
+        :raises: keystone.exception.UserNotFound,
+                 keystone.exception.GroupNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def check_user_in_group(self, user_id, group_id):
+        """Checks if a user is a member of a group.
+
+        :raises: keystone.exception.UserNotFound,
+                 keystone.exception.GroupNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def remove_user_from_group(self, user_id, group_id):
+        """Removes a user from a group.
+
+        :raises: keystone.exception.NotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_user(self, user_id):
+        """Deletes an existing user.
+
+        :raises: keystone.exception.UserNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_user_by_name(self, user_name, domain_id):
+        """Get a user by name.
+
+        :returns: user_ref
+        :raises: keystone.exception.UserNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # group crud
+
+    @abc.abstractmethod
+    def create_group(self, group_id, group):
+        """Creates a new group.
+
+        :raises: keystone.exception.Conflict
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_groups(self, hints):
+        """List groups in the system.
+
+        :param hints: filter hints which the driver should
+                      implement if at all possible.
+
+        :returns: a list of group_refs or an empty list.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_groups_for_user(self, user_id, hints):
+        """List groups a user is in
+
+        :param user_id: the user in question
+        :param hints: filter hints which the driver should
+                      implement if at all possible.
+
+        :returns: a list of group_refs or an empty list.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_group(self, group_id):
+        """Get a group by ID.
+
+        :returns: group_ref
+        :raises: keystone.exception.GroupNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_group_by_name(self, group_name, domain_id):
+        """Get a group by name.
+
+        :returns: group_ref
+        :raises: keystone.exception.GroupNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def update_group(self, group_id, group):
+        """Updates an existing group.
+
+        :raises: keystone.exceptionGroupNotFound,
+                 keystone.exception.Conflict
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_group(self, group_id):
+        """Deletes an existing group.
+
+        :raises: keystone.exception.GroupNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # end of identity
+
+
+@dependency.provider('id_mapping_api')
+class MappingManager(manager.Manager):
+    """Default pivot point for the ID Mapping backend."""
+
+    def __init__(self):
+        super(MappingManager, self).__init__(CONF.identity_mapping.driver)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class MappingDriver(object):
+    """Interface description for an ID Mapping driver."""
+
+    @abc.abstractmethod
+    def get_public_id(self, local_entity):
+        """Returns the public ID for the given local entity.
+
+        :param dict local_entity: Containing the entity domain, local ID and
+                                  type ('user' or 'group').
+        :returns: public ID, or None if no mapping is found.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_id_mapping(self, public_id):
+        """Returns the local mapping.
+
+        :param public_id: The public ID for the mapping required.
+        :returns dict: Containing the entity domain, local ID and type. If no
+                       mapping is found, it returns None.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def create_id_mapping(self, local_entity, public_id=None):
+        """Create and store a mapping to a public_id.
+
+        :param dict local_entity: Containing the entity domain, local ID and
+                                  type ('user' or 'group').
+        :param public_id: If specified, this will be the public ID.  If this
+                          is not specified, a public ID will be generated.
+        :returns: public ID
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_id_mapping(self, public_id):
+        """Deletes an entry for the given public_id.
+
+        :param public_id: The public ID for the mapping to be deleted.
+
+        The method is silent if no mapping is found.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def purge_mappings(self, purge_filter):
+        """Purge selected identity mappings.
+
+        :param dict purge_filter: Containing the attributes of the filter that
+                                  defines which entries to purge. An empty
+                                  filter means purge all mappings.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
diff --git a/keystone-moon/keystone/identity/generator.py b/keystone-moon/keystone/identity/generator.py
new file mode 100644 (file)
index 0000000..d25426c
--- /dev/null
@@ -0,0 +1,52 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""ID Generator provider interface."""
+
+import abc
+
+from oslo_config import cfg
+import six
+
+from keystone.common import dependency
+from keystone.common import manager
+from keystone import exception
+
+CONF = cfg.CONF
+
+
+@dependency.provider('id_generator_api')
+class Manager(manager.Manager):
+    """Default pivot point for the identifier generator backend."""
+
+    def __init__(self):
+        super(Manager, self).__init__(CONF.identity_mapping.generator)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class IDGenerator(object):
+    """Interface description for an ID Generator provider."""
+
+    @abc.abstractmethod
+    def generate_public_ID(self, mapping):
+        """Return a Public ID for the given mapping dict.
+
+        :param dict mapping: The items to be hashed.
+
+        The ID must be reproducible and no more than 64 chars in length.
+        The ID generated should be independent of the order of the items
+        in the mapping dict.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
diff --git a/keystone-moon/keystone/identity/id_generators/__init__.py b/keystone-moon/keystone/identity/id_generators/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/identity/id_generators/sha256.py b/keystone-moon/keystone/identity/id_generators/sha256.py
new file mode 100644 (file)
index 0000000..e3a8b41
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import hashlib
+
+import six
+
+from keystone.identity import generator
+
+
+class Generator(generator.IDGenerator):
+
+    def generate_public_ID(self, mapping):
+        m = hashlib.sha256()
+        for key in sorted(six.iterkeys(mapping)):
+            m.update(mapping[key].encode('utf-8'))
+        return m.hexdigest()
diff --git a/keystone-moon/keystone/identity/mapping_backends/__init__.py b/keystone-moon/keystone/identity/mapping_backends/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/identity/mapping_backends/mapping.py b/keystone-moon/keystone/identity/mapping_backends/mapping.py
new file mode 100644 (file)
index 0000000..dddf36c
--- /dev/null
@@ -0,0 +1,18 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+class EntityType(object):
+    USER = 'user'
+    GROUP = 'group'
diff --git a/keystone-moon/keystone/identity/mapping_backends/sql.py b/keystone-moon/keystone/identity/mapping_backends/sql.py
new file mode 100644 (file)
index 0000000..b2f9cb9
--- /dev/null
@@ -0,0 +1,97 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import dependency
+from keystone.common import sql
+from keystone import identity
+from keystone.identity.mapping_backends import mapping as identity_mapping
+
+
+class IDMapping(sql.ModelBase, sql.ModelDictMixin):
+    __tablename__ = 'id_mapping'
+    public_id = sql.Column(sql.String(64), primary_key=True)
+    domain_id = sql.Column(sql.String(64), nullable=False)
+    local_id = sql.Column(sql.String(64), nullable=False)
+    # NOTE(henry-nash); Postgres requires a name to be defined for an Enum
+    entity_type = sql.Column(
+        sql.Enum(identity_mapping.EntityType.USER,
+                 identity_mapping.EntityType.GROUP,
+                 name='entity_type'),
+        nullable=False)
+    # Unique constraint to ensure you can't store more than one mapping to the
+    # same underlying values
+    __table_args__ = (
+        sql.UniqueConstraint('domain_id', 'local_id', 'entity_type'), {})
+
+
+@dependency.requires('id_generator_api')
+class Mapping(identity.MappingDriver):
+
+    def get_public_id(self, local_entity):
+        # NOTE(henry-nash): Since the Public ID is regeneratable, rather
+        # than search for the entry using the local entity values, we
+        # could create the hash and do a PK lookup.  However this would only
+        # work if we hashed all the entries, even those that already generate
+        # UUIDs, like SQL.  Further, this would only work if the generation
+        # algorithm was immutable (e.g. it had always been sha256).
+        session = sql.get_session()
+        query = session.query(IDMapping.public_id)
+        query = query.filter_by(domain_id=local_entity['domain_id'])
+        query = query.filter_by(local_id=local_entity['local_id'])
+        query = query.filter_by(entity_type=local_entity['entity_type'])
+        try:
+            public_ref = query.one()
+            public_id = public_ref.public_id
+            return public_id
+        except sql.NotFound:
+            return None
+
+    def get_id_mapping(self, public_id):
+        session = sql.get_session()
+        mapping_ref = session.query(IDMapping).get(public_id)
+        if mapping_ref:
+            return mapping_ref.to_dict()
+
+    def create_id_mapping(self, local_entity, public_id=None):
+        entity = local_entity.copy()
+        with sql.transaction() as session:
+            if public_id is None:
+                public_id = self.id_generator_api.generate_public_ID(entity)
+            entity['public_id'] = public_id
+            mapping_ref = IDMapping.from_dict(entity)
+            session.add(mapping_ref)
+        return public_id
+
+    def delete_id_mapping(self, public_id):
+        with sql.transaction() as session:
+            try:
+                session.query(IDMapping).filter(
+                    IDMapping.public_id == public_id).delete()
+            except sql.NotFound:
+                # NOTE(morganfainberg): There is nothing to delete and nothing
+                # to do.
+                pass
+
+    def purge_mappings(self, purge_filter):
+        session = sql.get_session()
+        query = session.query(IDMapping)
+        if 'domain_id' in purge_filter:
+            query = query.filter_by(domain_id=purge_filter['domain_id'])
+        if 'public_id' in purge_filter:
+            query = query.filter_by(public_id=purge_filter['public_id'])
+        if 'local_id' in purge_filter:
+            query = query.filter_by(local_id=purge_filter['local_id'])
+        if 'entity_type' in purge_filter:
+            query = query.filter_by(entity_type=purge_filter['entity_type'])
+        query.delete()
diff --git a/keystone-moon/keystone/identity/routers.py b/keystone-moon/keystone/identity/routers.py
new file mode 100644 (file)
index 0000000..e274d6f
--- /dev/null
@@ -0,0 +1,84 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""WSGI Routers for the Identity service."""
+
+from keystone.common import json_home
+from keystone.common import router
+from keystone.common import wsgi
+from keystone.identity import controllers
+
+
+class Admin(wsgi.ComposableRouter):
+    def add_routes(self, mapper):
+        # User Operations
+        user_controller = controllers.User()
+        mapper.connect('/users/{user_id}',
+                       controller=user_controller,
+                       action='get_user',
+                       conditions=dict(method=['GET']))
+
+
+class Routers(wsgi.RoutersBase):
+
+    def append_v3_routers(self, mapper, routers):
+        user_controller = controllers.UserV3()
+        routers.append(
+            router.Router(user_controller,
+                          'users', 'user',
+                          resource_descriptions=self.v3_resources))
+
+        self._add_resource(
+            mapper, user_controller,
+            path='/users/{user_id}/password',
+            post_action='change_password',
+            rel=json_home.build_v3_resource_relation('user_change_password'),
+            path_vars={
+                'user_id': json_home.Parameters.USER_ID,
+            })
+
+        self._add_resource(
+            mapper, user_controller,
+            path='/groups/{group_id}/users',
+            get_action='list_users_in_group',
+            rel=json_home.build_v3_resource_relation('group_users'),
+            path_vars={
+                'group_id': json_home.Parameters.GROUP_ID,
+            })
+
+        self._add_resource(
+            mapper, user_controller,
+            path='/groups/{group_id}/users/{user_id}',
+            put_action='add_user_to_group',
+            get_head_action='check_user_in_group',
+            delete_action='remove_user_from_group',
+            rel=json_home.build_v3_resource_relation('group_user'),
+            path_vars={
+                'group_id': json_home.Parameters.GROUP_ID,
+                'user_id': json_home.Parameters.USER_ID,
+            })
+
+        group_controller = controllers.GroupV3()
+        routers.append(
+            router.Router(group_controller,
+                          'groups', 'group',
+                          resource_descriptions=self.v3_resources))
+
+        self._add_resource(
+            mapper, group_controller,
+            path='/users/{user_id}/groups',
+            get_action='list_groups_for_user',
+            rel=json_home.build_v3_resource_relation('user_groups'),
+            path_vars={
+                'user_id': json_home.Parameters.USER_ID,
+            })
diff --git a/keystone-moon/keystone/locale/de/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/de/LC_MESSAGES/keystone-log-critical.po
new file mode 100644 (file)
index 0000000..8e4b677
--- /dev/null
@@ -0,0 +1,25 @@
+# Translations template for keystone.
+# Copyright (C) 2014 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"PO-Revision-Date: 2014-08-31 15:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: German (http://www.transifex.com/projects/p/keystone/language/"
+"de/)\n"
+"Language: de\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+#: keystone/catalog/backends/templated.py:106
+#, python-format
+msgid "Unable to open template file %s"
+msgstr "Vorlagendatei %s kann nicht geöffnet werden"
diff --git a/keystone-moon/keystone/locale/de/LC_MESSAGES/keystone-log-info.po b/keystone-moon/keystone/locale/de/LC_MESSAGES/keystone-log-info.po
new file mode 100644 (file)
index 0000000..fdf84ad
--- /dev/null
@@ -0,0 +1,212 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-03-09 06:03+0000\n"
+"PO-Revision-Date: 2015-03-07 04:31+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: German (http://www.transifex.com/projects/p/keystone/language/"
+"de/)\n"
+"Language: de\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+#: keystone/assignment/core.py:250
+#, python-format
+msgid "Creating the default role %s because it does not exist."
+msgstr ""
+
+#: keystone/assignment/core.py:258
+#, python-format
+msgid "Creating the default role %s failed because it was already created"
+msgstr ""
+
+#: keystone/auth/controllers.py:64
+msgid "Loading auth-plugins by class-name is deprecated."
+msgstr ""
+
+#: keystone/auth/controllers.py:106
+#, python-format
+msgid ""
+"\"expires_at\" has conflicting values %(existing)s and %(new)s.  Will use "
+"the earliest value."
+msgstr ""
+
+#: keystone/common/openssl.py:81
+#, python-format
+msgid "Running command - %s"
+msgstr ""
+
+#: keystone/common/wsgi.py:79
+msgid "No bind information present in token"
+msgstr ""
+
+#: keystone/common/wsgi.py:83
+#, python-format
+msgid "Named bind mode %s not in bind information"
+msgstr ""
+
+#: keystone/common/wsgi.py:90
+msgid "Kerberos credentials required and not present"
+msgstr ""
+
+#: keystone/common/wsgi.py:94
+msgid "Kerberos credentials do not match those in bind"
+msgstr ""
+
+#: keystone/common/wsgi.py:98
+msgid "Kerberos bind authentication successful"
+msgstr ""
+
+#: keystone/common/wsgi.py:105
+#, python-format
+msgid "Couldn't verify unknown bind: {%(bind_type)s: %(identifier)s}"
+msgstr ""
+
+#: keystone/common/environment/eventlet_server.py:103
+#, python-format
+msgid "Starting %(arg0)s on %(host)s:%(port)s"
+msgstr "Starten von %(arg0)s auf %(host)s:%(port)s"
+
+#: keystone/common/kvs/core.py:138
+#, python-format
+msgid "Adding proxy '%(proxy)s' to KVS %(name)s."
+msgstr ""
+
+#: keystone/common/kvs/core.py:188
+#, python-format
+msgid "Using %(func)s as KVS region %(name)s key_mangler"
+msgstr ""
+
+#: keystone/common/kvs/core.py:200
+#, python-format
+msgid "Using default dogpile sha1_mangle_key as KVS region %s key_mangler"
+msgstr ""
+
+#: keystone/common/kvs/core.py:210
+#, python-format
+msgid "KVS region %s key_mangler disabled."
+msgstr ""
+
+#: keystone/contrib/example/core.py:64 keystone/contrib/example/core.py:73
+#, python-format
+msgid ""
+"Received the following notification: service %(service)s, resource_type: "
+"%(resource_type)s, operation %(operation)s payload %(payload)s"
+msgstr ""
+
+#: keystone/openstack/common/eventlet_backdoor.py:146
+#, python-format
+msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
+msgstr "Eventlet backdoor hört auf %(port)s für Prozess %(pid)d"
+
+#: keystone/openstack/common/service.py:173
+#, python-format
+msgid "Caught %s, exiting"
+msgstr "%s abgefangen. Vorgang wird beendet"
+
+#: keystone/openstack/common/service.py:231
+msgid "Parent process has died unexpectedly, exiting"
+msgstr ""
+"Übergeordneter Prozess wurde unerwartet abgebrochen. Vorgang wird beendet"
+
+#: keystone/openstack/common/service.py:262
+#, python-format
+msgid "Child caught %s, exiting"
+msgstr "Untergeordnetes Element %s abgefangen; Vorgang wird beendet"
+
+#: keystone/openstack/common/service.py:301
+msgid "Forking too fast, sleeping"
+msgstr "Verzweigung zu schnell; im Ruhemodus"
+
+#: keystone/openstack/common/service.py:320
+#, python-format
+msgid "Started child %d"
+msgstr "Untergeordnetes Element %d gestartet"
+
+#: keystone/openstack/common/service.py:330
+#, python-format
+msgid "Starting %d workers"
+msgstr "Starten von %d Workers"
+
+#: keystone/openstack/common/service.py:347
+#, python-format
+msgid "Child %(pid)d killed by signal %(sig)d"
+msgstr "Untergeordnetes Element %(pid)d durch Signal %(sig)d abgebrochen"
+
+#: keystone/openstack/common/service.py:351
+#, python-format
+msgid "Child %(pid)s exited with status %(code)d"
+msgstr "Untergeordnete %(pid)s mit Status %(code)d beendet"
+
+#: keystone/openstack/common/service.py:390
+#, python-format
+msgid "Caught %s, stopping children"
+msgstr "%s abgefangen, untergeordnete Elemente werden gestoppt"
+
+#: keystone/openstack/common/service.py:399
+msgid "Wait called after thread killed. Cleaning up."
+msgstr ""
+
+#: keystone/openstack/common/service.py:415
+#, python-format
+msgid "Waiting on %d children to exit"
+msgstr "Warten auf Beenden von %d untergeordneten Elementen"
+
+#: keystone/token/persistence/backends/sql.py:279
+#, python-format
+msgid "Total expired tokens removed: %d"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:72
+msgid ""
+"[fernet_tokens] key_repository does not appear to exist; attempting to "
+"create it"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:130
+#, python-format
+msgid "Created a new key: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:143
+msgid "Key repository is already initialized; aborting."
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:179
+#, python-format
+msgid "Starting key rotation with %(count)s key files: %(list)s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:185
+#, python-format
+msgid "Current primary key is: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:187
+#, python-format
+msgid "Next primary key will be: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:197
+#, python-format
+msgid "Promoted key 0 to be the primary: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:213
+#, python-format
+msgid "Excess keys to purge: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:237
+#, python-format
+msgid "Loaded %(count)s encryption keys from: %(dir)s"
+msgstr ""
diff --git a/keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone-log-critical.po
new file mode 100644 (file)
index 0000000..d2f5ebe
--- /dev/null
@@ -0,0 +1,25 @@
+# Translations template for keystone.
+# Copyright (C) 2014 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"PO-Revision-Date: 2014-08-31 15:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: English (Australia) (http://www.transifex.com/projects/p/"
+"keystone/language/en_AU/)\n"
+"Language: en_AU\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+#: keystone/catalog/backends/templated.py:106
+#, python-format
+msgid "Unable to open template file %s"
+msgstr "Unable to open template file %s"
diff --git a/keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone-log-error.po b/keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone-log-error.po
new file mode 100644 (file)
index 0000000..977af69
--- /dev/null
@@ -0,0 +1,179 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-03-09 06:03+0000\n"
+"PO-Revision-Date: 2015-03-07 04:31+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: English (Australia) (http://www.transifex.com/projects/p/"
+"keystone/language/en_AU/)\n"
+"Language: en_AU\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+#: keystone/notifications.py:304
+msgid "Failed to construct notifier"
+msgstr ""
+
+#: keystone/notifications.py:389
+#, python-format
+msgid "Failed to send %(res_id)s %(event_type)s notification"
+msgstr "Failed to send %(res_id)s %(event_type)s notification"
+
+#: keystone/notifications.py:606
+#, python-format
+msgid "Failed to send %(action)s %(event_type)s notification"
+msgstr ""
+
+#: keystone/catalog/core.py:62
+#, python-format
+msgid "Malformed endpoint - %(url)r is not a string"
+msgstr ""
+
+#: keystone/catalog/core.py:66
+#, python-format
+msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s"
+msgstr "Malformed endpoint %(url)s - unknown key %(keyerror)s"
+
+#: keystone/catalog/core.py:71
+#, python-format
+msgid ""
+"Malformed endpoint '%(url)s'. The following type error occurred during "
+"string substitution: %(typeerror)s"
+msgstr ""
+
+#: keystone/catalog/core.py:77
+#, python-format
+msgid ""
+"Malformed endpoint %s - incomplete format (are you missing a type notifier ?)"
+msgstr ""
+"Malformed endpoint %s - incomplete format (are you missing a type notifier ?)"
+
+#: keystone/common/openssl.py:93
+#, python-format
+msgid "Command %(to_exec)s exited with %(retcode)s- %(output)s"
+msgstr ""
+
+#: keystone/common/openssl.py:121
+#, python-format
+msgid "Failed to remove file %(file_path)r: %(error)s"
+msgstr ""
+
+#: keystone/common/utils.py:239
+msgid ""
+"Error setting up the debug environment. Verify that the option --debug-url "
+"has the format <host>:<port> and that a debugger processes is listening on "
+"that port."
+msgstr ""
+"Error setting up the debug environment. Verify that the option --debug-url "
+"has the format <host>:<port> and that a debugger processes is listening on "
+"that port."
+
+#: keystone/common/cache/core.py:100
+#, python-format
+msgid ""
+"Unable to build cache config-key. Expected format \"<argname>:<value>\". "
+"Skipping unknown format: %s"
+msgstr ""
+"Unable to build cache config-key. Expected format \"<argname>:<value>\". "
+"Skipping unknown format: %s"
+
+#: keystone/common/environment/eventlet_server.py:99
+#, python-format
+msgid "Could not bind to %(host)s:%(port)s"
+msgstr ""
+
+#: keystone/common/environment/eventlet_server.py:185
+msgid "Server error"
+msgstr "Server error"
+
+#: keystone/contrib/endpoint_policy/core.py:129
+#: keystone/contrib/endpoint_policy/core.py:228
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found in region tree - %(region_id)s."
+msgstr ""
+
+#: keystone/contrib/federation/idp.py:410
+#, python-format
+msgid "Error when signing assertion, reason: %(reason)s"
+msgstr ""
+
+#: keystone/contrib/oauth1/core.py:136
+msgid "Cannot retrieve Authorization headers"
+msgstr ""
+
+#: keystone/openstack/common/loopingcall.py:95
+msgid "in fixed duration looping call"
+msgstr "in fixed duration looping call"
+
+#: keystone/openstack/common/loopingcall.py:138
+msgid "in dynamic looping call"
+msgstr "in dynamic looping call"
+
+#: keystone/openstack/common/service.py:268
+msgid "Unhandled exception"
+msgstr "Unhandled exception"
+
+#: keystone/resource/core.py:477
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found projects hierarchy - "
+"%(project_id)s."
+msgstr ""
+
+#: keystone/resource/core.py:939
+#, python-format
+msgid ""
+"Unexpected results in response for domain config - %(count)s responses, "
+"first option is %(option)s, expected option %(expected)s"
+msgstr ""
+
+#: keystone/resource/backends/sql.py:102 keystone/resource/backends/sql.py:121
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found in projects hierarchy - "
+"%(project_id)s."
+msgstr ""
+
+#: keystone/token/provider.py:292
+#, python-format
+msgid "Unexpected error or malformed token determining token expiry: %s"
+msgstr "Unexpected error or malformed token determining token expiry: %s"
+
+#: keystone/token/persistence/backends/kvs.py:226
+#, python-format
+msgid ""
+"Reinitializing revocation list due to error in loading revocation list from "
+"backend.  Expected `list` type got `%(type)s`. Old revocation list data: "
+"%(list)r"
+msgstr ""
+
+#: keystone/token/providers/common.py:611
+msgid "Failed to validate token"
+msgstr "Failed to validate token"
+
+#: keystone/token/providers/pki.py:47
+msgid "Unable to sign token"
+msgstr "Unable to sign token"
+
+#: keystone/token/providers/fernet/utils.py:38
+#, python-format
+msgid ""
+"Either [fernet_tokens] key_repository does not exist or Keystone does not "
+"have sufficient permission to access it: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:79
+msgid ""
+"Failed to create [fernet_tokens] key_repository: either it already exists or "
+"you don't have sufficient permissions to create it"
+msgstr ""
diff --git a/keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone.po b/keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone.po
new file mode 100644 (file)
index 0000000..e3dea47
--- /dev/null
@@ -0,0 +1,1542 @@
+# English (Australia) translations for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+# Tom Fifield <tom@openstack.org>, 2013
+msgid ""
+msgstr ""
+"Project-Id-Version:  Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-03-23 06:04+0000\n"
+"PO-Revision-Date: 2015-03-21 23:03+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: English (Australia) "
+"(http://www.transifex.com/projects/p/keystone/language/en_AU/)\n"
+"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+
+#: keystone/clean.py:24
+#, python-format
+msgid "%s cannot be empty."
+msgstr "%s cannot be empty."
+
+#: keystone/clean.py:26
+#, python-format
+msgid "%(property_name)s cannot be less than %(min_length)s characters."
+msgstr "%(property_name)s cannot be less than %(min_length)s characters."
+
+#: keystone/clean.py:31
+#, python-format
+msgid "%(property_name)s should not be greater than %(max_length)s characters."
+msgstr "%(property_name)s should not be greater than %(max_length)s characters."
+
+#: keystone/clean.py:40
+#, python-format
+msgid "%(property_name)s is not a %(display_expected_type)s"
+msgstr "%(property_name)s is not a %(display_expected_type)s"
+
+#: keystone/cli.py:283
+msgid "At least one option must be provided"
+msgstr ""
+
+#: keystone/cli.py:290
+msgid "--all option cannot be mixed with other options"
+msgstr ""
+
+#: keystone/cli.py:301
+#, python-format
+msgid "Unknown domain '%(name)s' specified by --domain-name"
+msgstr ""
+
+#: keystone/cli.py:365 keystone/tests/unit/test_cli.py:213
+msgid "At least one option must be provided, use either --all or --domain-name"
+msgstr ""
+
+#: keystone/cli.py:371 keystone/tests/unit/test_cli.py:229
+msgid "The --all option cannot be used with the --domain-name option"
+msgstr ""
+
+#: keystone/cli.py:397 keystone/tests/unit/test_cli.py:246
+#, python-format
+msgid ""
+"Invalid domain name: %(domain)s found in config file name: %(file)s - "
+"ignoring this file."
+msgstr ""
+
+#: keystone/cli.py:405 keystone/tests/unit/test_cli.py:187
+#, python-format
+msgid ""
+"Domain: %(domain)s already has a configuration defined - ignoring file: "
+"%(file)s."
+msgstr ""
+
+#: keystone/cli.py:419
+#, python-format
+msgid "Error parsing configuration file for domain: %(domain)s, file: %(file)s."
+msgstr ""
+
+#: keystone/cli.py:452
+#, python-format
+msgid ""
+"To get a more detailed information on this error, re-run this command for"
+" the specific domain, i.e.: keystone-manage domain_config_upload "
+"--domain-name %s"
+msgstr ""
+
+#: keystone/cli.py:470
+#, python-format
+msgid "Unable to locate domain config directory: %s"
+msgstr "Unable to locate domain config directory: %s"
+
+#: keystone/cli.py:503
+msgid ""
+"Unable to access the keystone database, please check it is configured "
+"correctly."
+msgstr ""
+
+#: keystone/exception.py:79
+#, python-format
+msgid ""
+"Expecting to find %(attribute)s in %(target)s - the server could not "
+"comply with the request since it is either malformed or otherwise "
+"incorrect. The client is assumed to be in error."
+msgstr ""
+
+#: keystone/exception.py:90
+#, python-format
+msgid "%(detail)s"
+msgstr ""
+
+#: keystone/exception.py:94
+msgid ""
+"Timestamp not in expected format. The server could not comply with the "
+"request since it is either malformed or otherwise incorrect. The client "
+"is assumed to be in error."
+msgstr ""
+"Timestamp not in expected format. The server could not comply with the "
+"request since it is either malformed or otherwise incorrect. The client "
+"is assumed to be in error."
+
+#: keystone/exception.py:103
+#, python-format
+msgid ""
+"String length exceeded.The length of string '%(string)s' exceeded the "
+"limit of column %(type)s(CHAR(%(length)d))."
+msgstr ""
+"String length exceeded.The length of string '%(string)s' exceeded the "
+"limit of column %(type)s(CHAR(%(length)d))."
+
+#: keystone/exception.py:109
+#, python-format
+msgid ""
+"Request attribute %(attribute)s must be less than or equal to %(size)i. "
+"The server could not comply with the request because the attribute size "
+"is invalid (too large). The client is assumed to be in error."
+msgstr ""
+"Request attribute %(attribute)s must be less than or equal to %(size)i. "
+"The server could not comply with the request because the attribute size "
+"is invalid (too large). The client is assumed to be in error."
+
+#: keystone/exception.py:119
+#, python-format
+msgid ""
+"The specified parent region %(parent_region_id)s would create a circular "
+"region hierarchy."
+msgstr ""
+
+#: keystone/exception.py:126
+#, python-format
+msgid ""
+"The password length must be less than or equal to %(size)i. The server "
+"could not comply with the request because the password is invalid."
+msgstr ""
+
+#: keystone/exception.py:134
+#, python-format
+msgid ""
+"Unable to delete region %(region_id)s because it or its child regions "
+"have associated endpoints."
+msgstr ""
+
+#: keystone/exception.py:141
+msgid ""
+"The certificates you requested are not available. It is likely that this "
+"server does not use PKI tokens otherwise this is the result of "
+"misconfiguration."
+msgstr ""
+
+#: keystone/exception.py:150
+msgid "(Disable debug mode to suppress these details.)"
+msgstr ""
+
+#: keystone/exception.py:155
+#, python-format
+msgid "%(message)s %(amendment)s"
+msgstr ""
+
+#: keystone/exception.py:163
+msgid "The request you have made requires authentication."
+msgstr "The request you have made requires authentication."
+
+#: keystone/exception.py:169
+msgid "Authentication plugin error."
+msgstr "Authentication plugin error."
+
+#: keystone/exception.py:177
+#, python-format
+msgid "Unable to find valid groups while using mapping %(mapping_id)s"
+msgstr ""
+
+#: keystone/exception.py:182
+msgid "Attempted to authenticate with an unsupported method."
+msgstr "Attempted to authenticate with an unsupported method."
+
+#: keystone/exception.py:190
+msgid "Additional authentications steps required."
+msgstr "Additional authentications steps required."
+
+#: keystone/exception.py:198
+msgid "You are not authorized to perform the requested action."
+msgstr "You are not authorized to perform the requested action."
+
+#: keystone/exception.py:205
+#, python-format
+msgid "You are not authorized to perform the requested action: %(action)s"
+msgstr ""
+
+#: keystone/exception.py:210
+#, python-format
+msgid ""
+"Could not change immutable attribute(s) '%(attributes)s' in target "
+"%(target)s"
+msgstr ""
+
+#: keystone/exception.py:215
+#, python-format
+msgid ""
+"Group membership across backend boundaries is not allowed, group in "
+"question is %(group_id)s, user is %(user_id)s"
+msgstr ""
+
+#: keystone/exception.py:221
+#, python-format
+msgid ""
+"Invalid mix of entities for policy association - only Endpoint, Service "
+"or Region+Service allowed. Request was - Endpoint: %(endpoint_id)s, "
+"Service: %(service_id)s, Region: %(region_id)s"
+msgstr ""
+
+#: keystone/exception.py:228
+#, python-format
+msgid "Invalid domain specific configuration: %(reason)s"
+msgstr ""
+
+#: keystone/exception.py:232
+#, python-format
+msgid "Could not find: %(target)s"
+msgstr ""
+
+#: keystone/exception.py:238
+#, python-format
+msgid "Could not find endpoint: %(endpoint_id)s"
+msgstr ""
+
+#: keystone/exception.py:245
+msgid "An unhandled exception has occurred: Could not find metadata."
+msgstr "An unhandled exception has occurred: Could not find metadata."
+
+#: keystone/exception.py:250
+#, python-format
+msgid "Could not find policy: %(policy_id)s"
+msgstr ""
+
+#: keystone/exception.py:254
+msgid "Could not find policy association"
+msgstr ""
+
+#: keystone/exception.py:258
+#, python-format
+msgid "Could not find role: %(role_id)s"
+msgstr ""
+
+#: keystone/exception.py:262
+#, python-format
+msgid ""
+"Could not find role assignment with role: %(role_id)s, user or group: "
+"%(actor_id)s, project or domain: %(target_id)s"
+msgstr ""
+
+#: keystone/exception.py:268
+#, python-format
+msgid "Could not find region: %(region_id)s"
+msgstr ""
+
+#: keystone/exception.py:272
+#, python-format
+msgid "Could not find service: %(service_id)s"
+msgstr ""
+
+#: keystone/exception.py:276
+#, python-format
+msgid "Could not find domain: %(domain_id)s"
+msgstr ""
+
+#: keystone/exception.py:280
+#, python-format
+msgid "Could not find project: %(project_id)s"
+msgstr ""
+
+#: keystone/exception.py:284
+#, python-format
+msgid "Cannot create project with parent: %(project_id)s"
+msgstr ""
+
+#: keystone/exception.py:288
+#, python-format
+msgid "Could not find token: %(token_id)s"
+msgstr ""
+
+#: keystone/exception.py:292
+#, python-format
+msgid "Could not find user: %(user_id)s"
+msgstr ""
+
+#: keystone/exception.py:296
+#, python-format
+msgid "Could not find group: %(group_id)s"
+msgstr ""
+
+#: keystone/exception.py:300
+#, python-format
+msgid "Could not find mapping: %(mapping_id)s"
+msgstr ""
+
+#: keystone/exception.py:304
+#, python-format
+msgid "Could not find trust: %(trust_id)s"
+msgstr ""
+
+#: keystone/exception.py:308
+#, python-format
+msgid "No remaining uses for trust: %(trust_id)s"
+msgstr ""
+
+#: keystone/exception.py:312
+#, python-format
+msgid "Could not find credential: %(credential_id)s"
+msgstr ""
+
+#: keystone/exception.py:316
+#, python-format
+msgid "Could not find version: %(version)s"
+msgstr ""
+
+#: keystone/exception.py:320
+#, python-format
+msgid "Could not find Endpoint Group: %(endpoint_group_id)s"
+msgstr ""
+
+#: keystone/exception.py:324
+#, python-format
+msgid "Could not find Identity Provider: %(idp_id)s"
+msgstr ""
+
+#: keystone/exception.py:328
+#, python-format
+msgid "Could not find Service Provider: %(sp_id)s"
+msgstr ""
+
+#: keystone/exception.py:332
+#, python-format
+msgid ""
+"Could not find federated protocol %(protocol_id)s for Identity Provider: "
+"%(idp_id)s"
+msgstr ""
+
+#: keystone/exception.py:343
+#, python-format
+msgid ""
+"Could not find %(group_or_option)s in domain configuration for domain "
+"%(domain_id)s"
+msgstr ""
+
+#: keystone/exception.py:348
+#, python-format
+msgid "Conflict occurred attempting to store %(type)s - %(details)s"
+msgstr ""
+
+#: keystone/exception.py:356
+msgid "An unexpected error prevented the server from fulfilling your request."
+msgstr ""
+
+#: keystone/exception.py:359
+#, python-format
+msgid ""
+"An unexpected error prevented the server from fulfilling your request: "
+"%(exception)s"
+msgstr ""
+
+#: keystone/exception.py:382
+#, python-format
+msgid "Unable to consume trust %(trust_id)s, unable to acquire lock."
+msgstr ""
+
+#: keystone/exception.py:387
+msgid ""
+"Expected signing certificates are not available on the server. Please "
+"check Keystone configuration."
+msgstr ""
+
+#: keystone/exception.py:393
+#, python-format
+msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details."
+msgstr "Malformed endpoint URL (%(endpoint)s), see ERROR log for details."
+
+#: keystone/exception.py:398
+#, python-format
+msgid ""
+"Group %(group_id)s returned by mapping %(mapping_id)s was not found in "
+"the backend."
+msgstr ""
+
+#: keystone/exception.py:403
+#, python-format
+msgid "Error while reading metadata file, %(reason)s"
+msgstr ""
+
+#: keystone/exception.py:407
+#, python-format
+msgid ""
+"Unexpected combination of grant attributes - User: %(user_id)s, Group: "
+"%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s"
+msgstr ""
+
+#: keystone/exception.py:414
+msgid "The action you have requested has not been implemented."
+msgstr "The action you have requested has not been implemented."
+
+#: keystone/exception.py:421
+msgid "The service you have requested is no longer available on this server."
+msgstr ""
+
+#: keystone/exception.py:428
+#, python-format
+msgid "The Keystone configuration file %(config_file)s could not be found."
+msgstr "The Keystone configuration file %(config_file)s could not be found."
+
+#: keystone/exception.py:433
+msgid ""
+"No encryption keys found; run keystone-manage fernet_setup to bootstrap "
+"one."
+msgstr ""
+
+#: keystone/exception.py:438
+#, python-format
+msgid ""
+"The Keystone domain-specific configuration has specified more than one "
+"SQL driver (only one is permitted): %(source)s."
+msgstr ""
+
+#: keystone/exception.py:445
+#, python-format
+msgid ""
+"%(mod_name)s doesn't provide database migrations. The migration "
+"repository path at %(path)s doesn't exist or isn't a directory."
+msgstr ""
+
+#: keystone/exception.py:457
+#, python-format
+msgid ""
+"Unable to sign SAML assertion. It is likely that this server does not "
+"have xmlsec1 installed, or this is the result of misconfiguration. Reason"
+" %(reason)s"
+msgstr ""
+
+#: keystone/exception.py:465
+msgid ""
+"No Authorization headers found, cannot proceed with OAuth related calls, "
+"if running under HTTPd or Apache, ensure WSGIPassAuthorization is set to "
+"On."
+msgstr ""
+
+#: keystone/notifications.py:250
+#, python-format
+msgid "%(event)s is not a valid notification event, must be one of: %(actions)s"
+msgstr ""
+
+#: keystone/notifications.py:259
+#, python-format
+msgid "Method not callable: %s"
+msgstr ""
+
+#: keystone/assignment/controllers.py:107 keystone/identity/controllers.py:69
+#: keystone/resource/controllers.py:78
+msgid "Name field is required and cannot be empty"
+msgstr "Name field is required and cannot be empty"
+
+#: keystone/assignment/controllers.py:330
+#: keystone/assignment/controllers.py:753
+msgid "Specify a domain or project, not both"
+msgstr "Specify a domain or project, not both"
+
+#: keystone/assignment/controllers.py:333
+msgid "Specify one of domain or project"
+msgstr ""
+
+#: keystone/assignment/controllers.py:338
+#: keystone/assignment/controllers.py:758
+msgid "Specify a user or group, not both"
+msgstr "Specify a user or group, not both"
+
+#: keystone/assignment/controllers.py:341
+msgid "Specify one of user or group"
+msgstr ""
+
+#: keystone/assignment/controllers.py:742
+msgid "Combining effective and group filter will always result in an empty list."
+msgstr ""
+
+#: keystone/assignment/controllers.py:747
+msgid ""
+"Combining effective, domain and inherited filters will always result in "
+"an empty list."
+msgstr ""
+
+#: keystone/assignment/core.py:228
+msgid "Must specify either domain or project"
+msgstr ""
+
+#: keystone/assignment/core.py:493
+#, python-format
+msgid "Project (%s)"
+msgstr "Project (%s)"
+
+#: keystone/assignment/core.py:495
+#, python-format
+msgid "Domain (%s)"
+msgstr "Domain (%s)"
+
+#: keystone/assignment/core.py:497
+msgid "Unknown Target"
+msgstr "Unknown Target"
+
+#: keystone/assignment/backends/ldap.py:92
+msgid "Domain metadata not supported by LDAP"
+msgstr ""
+
+#: keystone/assignment/backends/ldap.py:381
+#, python-format
+msgid "User %(user_id)s already has role %(role_id)s in tenant %(tenant_id)s"
+msgstr ""
+
+#: keystone/assignment/backends/ldap.py:387
+#, python-format
+msgid "Role %s not found"
+msgstr "Role %s not found"
+
+#: keystone/assignment/backends/ldap.py:402
+#: keystone/assignment/backends/sql.py:335
+#, python-format
+msgid "Cannot remove role that has not been granted, %s"
+msgstr "Cannot remove role that has not been granted, %s"
+
+#: keystone/assignment/backends/sql.py:356
+#, python-format
+msgid "Unexpected assignment type encountered, %s"
+msgstr ""
+
+#: keystone/assignment/role_backends/ldap.py:61 keystone/catalog/core.py:103
+#: keystone/common/ldap/core.py:1400 keystone/resource/backends/ldap.py:149
+#, python-format
+msgid "Duplicate ID, %s."
+msgstr "Duplicate ID, %s."
+
+#: keystone/assignment/role_backends/ldap.py:69
+#: keystone/common/ldap/core.py:1390
+#, python-format
+msgid "Duplicate name, %s."
+msgstr "Duplicate name, %s."
+
+#: keystone/assignment/role_backends/ldap.py:119
+#, python-format
+msgid "Cannot duplicate name %s"
+msgstr ""
+
+#: keystone/auth/controllers.py:60
+#, python-format
+msgid ""
+"Cannot load an auth-plugin by class-name without a \"method\" attribute "
+"defined: %s"
+msgstr ""
+
+#: keystone/auth/controllers.py:71
+#, python-format
+msgid ""
+"Auth plugin %(plugin)s is requesting previously registered method "
+"%(method)s"
+msgstr ""
+
+#: keystone/auth/controllers.py:115
+#, python-format
+msgid ""
+"Unable to reconcile identity attribute %(attribute)s as it has "
+"conflicting values %(new)s and %(old)s"
+msgstr ""
+
+#: keystone/auth/controllers.py:336
+msgid "Scoping to both domain and project is not allowed"
+msgstr "Scoping to both domain and project is not allowed"
+
+#: keystone/auth/controllers.py:339
+msgid "Scoping to both domain and trust is not allowed"
+msgstr "Scoping to both domain and trust is not allowed"
+
+#: keystone/auth/controllers.py:342
+msgid "Scoping to both project and trust is not allowed"
+msgstr "Scoping to both project and trust is not allowed"
+
+#: keystone/auth/controllers.py:512
+msgid "User not found"
+msgstr "User not found"
+
+#: keystone/auth/controllers.py:616
+msgid "A project-scoped token is required to produce a service catalog."
+msgstr ""
+
+#: keystone/auth/plugins/external.py:46
+msgid "No authenticated user"
+msgstr "No authenticated user"
+
+#: keystone/auth/plugins/external.py:56
+#, python-format
+msgid "Unable to lookup user %s"
+msgstr "Unable to lookup user %s"
+
+#: keystone/auth/plugins/external.py:107
+msgid "auth_type is not Negotiate"
+msgstr ""
+
+#: keystone/auth/plugins/mapped.py:244
+msgid "Could not map user"
+msgstr ""
+
+#: keystone/auth/plugins/oauth1.py:39
+#, python-format
+msgid "%s not supported"
+msgstr ""
+
+#: keystone/auth/plugins/oauth1.py:57
+msgid "Access token is expired"
+msgstr "Access token is expired"
+
+#: keystone/auth/plugins/oauth1.py:71
+msgid "Could not validate the access token"
+msgstr ""
+
+#: keystone/auth/plugins/password.py:46
+msgid "Invalid username or password"
+msgstr "Invalid username or password"
+
+#: keystone/auth/plugins/token.py:72 keystone/token/controllers.py:160
+msgid "rescope a scoped token"
+msgstr ""
+
+#: keystone/catalog/controllers.py:168
+#, python-format
+msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\""
+msgstr ""
+
+#: keystone/common/authorization.py:47 keystone/common/wsgi.py:64
+#, python-format
+msgid "token reference must be a KeystoneToken type, got: %s"
+msgstr ""
+
+#: keystone/common/base64utils.py:66
+msgid "pad must be single character"
+msgstr "pad must be single character"
+
+#: keystone/common/base64utils.py:215
+#, python-format
+msgid "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
+msgstr "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
+
+#: keystone/common/base64utils.py:219
+#, python-format
+msgid "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
+msgstr "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
+
+#: keystone/common/base64utils.py:225
+#, python-format
+msgid "text is not a multiple of 4, but contains pad \"%s\""
+msgstr "text is not a multiple of 4, but contains pad \"%s\""
+
+#: keystone/common/base64utils.py:244 keystone/common/base64utils.py:265
+msgid "padded base64url text must be multiple of 4 characters"
+msgstr "padded base64url text must be multiple of 4 characters"
+
+#: keystone/common/controller.py:237 keystone/token/providers/common.py:589
+msgid "Non-default domain is not supported"
+msgstr "Non-default domain is not supported"
+
+#: keystone/common/controller.py:305 keystone/identity/core.py:428
+#: keystone/resource/core.py:761 keystone/resource/backends/ldap.py:61
+#, python-format
+msgid "Expected dict or list: %s"
+msgstr "Expected dict or list: %s"
+
+#: keystone/common/controller.py:318
+msgid "Marker could not be found"
+msgstr "Marker could not be found"
+
+#: keystone/common/controller.py:329
+msgid "Invalid limit value"
+msgstr "Invalid limit value"
+
+#: keystone/common/controller.py:637
+msgid "Cannot change Domain ID"
+msgstr ""
+
+#: keystone/common/controller.py:666
+msgid "domain_id is required as part of entity"
+msgstr ""
+
+#: keystone/common/controller.py:701
+msgid "A domain-scoped token must be used"
+msgstr ""
+
+#: keystone/common/dependency.py:68
+#, python-format
+msgid "Unregistered dependency: %(name)s for %(targets)s"
+msgstr ""
+
+#: keystone/common/dependency.py:108
+msgid "event_callbacks must be a dict"
+msgstr ""
+
+#: keystone/common/dependency.py:113
+#, python-format
+msgid "event_callbacks[%s] must be a dict"
+msgstr ""
+
+#: keystone/common/pemutils.py:223
+#, python-format
+msgid "unknown pem_type \"%(pem_type)s\", valid types are: %(valid_pem_types)s"
+msgstr "unknown pem_type \"%(pem_type)s\", valid types are: %(valid_pem_types)s"
+
+#: keystone/common/pemutils.py:242
+#, python-format
+msgid ""
+"unknown pem header \"%(pem_header)s\", valid headers are: "
+"%(valid_pem_headers)s"
+msgstr ""
+"unknown pem header \"%(pem_header)s\", valid headers are: "
+"%(valid_pem_headers)s"
+
+#: keystone/common/pemutils.py:298
+#, python-format
+msgid "failed to find end matching \"%s\""
+msgstr "failed to find end matching \"%s\""
+
+#: keystone/common/pemutils.py:302
+#, python-format
+msgid ""
+"beginning & end PEM headers do not match (%(begin_pem_header)s!= "
+"%(end_pem_header)s)"
+msgstr ""
+"beginning & end PEM headers do not match (%(begin_pem_header)s!= "
+"%(end_pem_header)s)"
+
+#: keystone/common/pemutils.py:377
+#, python-format
+msgid "unknown pem_type: \"%s\""
+msgstr "unknown pem_type: \"%s\""
+
+#: keystone/common/pemutils.py:389
+#, python-format
+msgid ""
+"failed to base64 decode %(pem_type)s PEM at position%(position)d: "
+"%(err_msg)s"
+msgstr ""
+"failed to base64 decode %(pem_type)s PEM at position%(position)d: "
+"%(err_msg)s"
+
+#: keystone/common/utils.py:164 keystone/credential/controllers.py:44
+msgid "Invalid blob in credential"
+msgstr "Invalid blob in credential"
+
+#: keystone/common/wsgi.py:330
+#, python-format
+msgid "%s field is required and cannot be empty"
+msgstr ""
+
+#: keystone/common/wsgi.py:342
+#, python-format
+msgid "%s field(s) cannot be empty"
+msgstr ""
+
+#: keystone/common/wsgi.py:563
+msgid "The resource could not be found."
+msgstr "The resource could not be found."
+
+#: keystone/common/wsgi.py:704
+#, python-format
+msgid "Unexpected status requested for JSON Home response, %s"
+msgstr ""
+
+#: keystone/common/cache/_memcache_pool.py:113
+#, python-format
+msgid "Unable to get a connection from pool id %(id)s after %(seconds)s seconds."
+msgstr ""
+
+#: keystone/common/cache/core.py:132
+msgid "region not type dogpile.cache.CacheRegion"
+msgstr "region not type dogpile.cache.CacheRegion"
+
+#: keystone/common/cache/backends/mongo.py:231
+msgid "db_hosts value is required"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:236
+msgid "database db_name is required"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:241
+msgid "cache_collection name is required"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:252
+msgid "integer value expected for w (write concern attribute)"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:260
+msgid "replicaset_name required when use_replica is True"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:275
+msgid "integer value expected for mongo_ttl_seconds"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:301
+msgid "no ssl support available"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:310
+#, python-format
+msgid ""
+"Invalid ssl_cert_reqs value of %s, must be one of \"NONE\", \"OPTIONAL\","
+" \"REQUIRED\""
+msgstr ""
+
+#: keystone/common/kvs/core.py:71
+#, python-format
+msgid "Lock Timeout occurred for key, %(target)s"
+msgstr ""
+
+#: keystone/common/kvs/core.py:106
+#, python-format
+msgid "KVS region %s is already configured. Cannot reconfigure."
+msgstr ""
+
+#: keystone/common/kvs/core.py:145
+#, python-format
+msgid "Key Value Store not configured: %s"
+msgstr ""
+
+#: keystone/common/kvs/core.py:198
+msgid "`key_mangler` option must be a function reference"
+msgstr ""
+
+#: keystone/common/kvs/core.py:353
+#, python-format
+msgid "Lock key must match target key: %(lock)s != %(target)s"
+msgstr ""
+
+#: keystone/common/kvs/core.py:357
+msgid "Must be called within an active lock context."
+msgstr ""
+
+#: keystone/common/kvs/backends/memcached.py:69
+#, python-format
+msgid "Maximum lock attempts on %s occurred."
+msgstr ""
+
+#: keystone/common/kvs/backends/memcached.py:108
+#, python-format
+msgid ""
+"Backend `%(driver)s` is not a valid memcached backend. Valid drivers: "
+"%(driver_list)s"
+msgstr ""
+
+#: keystone/common/kvs/backends/memcached.py:178
+msgid "`key_mangler` functions must be callable."
+msgstr ""
+
+#: keystone/common/ldap/core.py:191
+#, python-format
+msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s"
+msgstr ""
+
+#: keystone/common/ldap/core.py:201
+#, python-format
+msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s"
+msgstr "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s"
+
+#: keystone/common/ldap/core.py:213
+#, python-format
+msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s"
+msgstr "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s"
+
+#: keystone/common/ldap/core.py:588
+msgid "Invalid TLS / LDAPS combination"
+msgstr "Invalid TLS / LDAPS combination"
+
+#: keystone/common/ldap/core.py:593
+#, python-format
+msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available"
+msgstr "Invalid LDAP TLS_AVAIL option: %s. TLS not available"
+
+#: keystone/common/ldap/core.py:603
+#, python-format
+msgid "tls_cacertfile %s not found or is not a file"
+msgstr "tls_cacertfile %s not found or is not a file"
+
+#: keystone/common/ldap/core.py:615
+#, python-format
+msgid "tls_cacertdir %s not found or is not a directory"
+msgstr "tls_cacertdir %s not found or is not a directory"
+
+#: keystone/common/ldap/core.py:1325
+#, python-format
+msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s"
+msgstr ""
+
+#: keystone/common/ldap/core.py:1369
+#, python-format
+msgid "LDAP %s create"
+msgstr "LDAP %s create"
+
+#: keystone/common/ldap/core.py:1374
+#, python-format
+msgid "LDAP %s update"
+msgstr "LDAP %s update"
+
+#: keystone/common/ldap/core.py:1379
+#, python-format
+msgid "LDAP %s delete"
+msgstr "LDAP %s delete"
+
+#: keystone/common/ldap/core.py:1521
+msgid ""
+"Disabling an entity where the 'enable' attribute is ignored by "
+"configuration."
+msgstr ""
+
+#: keystone/common/ldap/core.py:1532
+#, python-format
+msgid "Cannot change %(option_name)s %(attr)s"
+msgstr "Cannot change %(option_name)s %(attr)s"
+
+#: keystone/common/ldap/core.py:1619
+#, python-format
+msgid "Member %(member)s is already a member of group %(group)s"
+msgstr ""
+
+#: keystone/common/sql/core.py:219
+msgid ""
+"Cannot truncate a driver call without hints list as first parameter after"
+" self "
+msgstr ""
+
+#: keystone/common/sql/core.py:410
+msgid "Duplicate Entry"
+msgstr ""
+
+#: keystone/common/sql/core.py:426
+#, python-format
+msgid "An unexpected error occurred when trying to store %s"
+msgstr ""
+
+#: keystone/common/sql/migration_helpers.py:187
+#: keystone/common/sql/migration_helpers.py:245
+#, python-format
+msgid "%s extension does not exist."
+msgstr ""
+
+#: keystone/common/validation/validators.py:54
+#, python-format
+msgid "Invalid input for field '%(path)s'. The value is '%(value)s'."
+msgstr ""
+
+#: keystone/contrib/ec2/controllers.py:318
+msgid "Token belongs to another user"
+msgstr "Token belongs to another user"
+
+#: keystone/contrib/ec2/controllers.py:346
+msgid "Credential belongs to another user"
+msgstr "Credential belongs to another user"
+
+#: keystone/contrib/endpoint_filter/backends/sql.py:69
+#, python-format
+msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s"
+msgstr "Endpoint %(endpoint_id)s not found in project %(project_id)s"
+
+#: keystone/contrib/endpoint_filter/backends/sql.py:180
+msgid "Endpoint Group Project Association not found"
+msgstr ""
+
+#: keystone/contrib/endpoint_policy/core.py:258
+#, python-format
+msgid "No policy is associated with endpoint %(endpoint_id)s."
+msgstr ""
+
+#: keystone/contrib/federation/controllers.py:274
+msgid "Missing entity ID from environment"
+msgstr ""
+
+#: keystone/contrib/federation/controllers.py:282
+msgid "Request must have an origin query parameter"
+msgstr ""
+
+#: keystone/contrib/federation/controllers.py:292
+#, python-format
+msgid "%(host)s is not a trusted dashboard host"
+msgstr ""
+
+#: keystone/contrib/federation/controllers.py:333
+msgid "Use a project scoped token when attempting to create a SAML assertion"
+msgstr ""
+
+#: keystone/contrib/federation/idp.py:454
+#, python-format
+msgid "Cannot open certificate %(cert_file)s. Reason: %(reason)s"
+msgstr ""
+
+#: keystone/contrib/federation/idp.py:521
+msgid "Ensure configuration option idp_entity_id is set."
+msgstr ""
+
+#: keystone/contrib/federation/idp.py:524
+msgid "Ensure configuration option idp_sso_endpoint is set."
+msgstr ""
+
+#: keystone/contrib/federation/idp.py:544
+msgid ""
+"idp_contact_type must be one of: [technical, other, support, "
+"administrative or billing."
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:178
+msgid "Federation token is expired"
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:208
+msgid ""
+"Could not find Identity Provider identifier in environment, check "
+"[federation] remote_id_attribute for details."
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:213
+msgid ""
+"Incoming identity provider identifier not included among the accepted "
+"identifiers."
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:501
+#, python-format
+msgid "User type %s not supported"
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:537
+#, python-format
+msgid ""
+"Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords "
+"must be specified."
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:753
+#, python-format
+msgid "Identity Provider %(idp)s is disabled"
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:761
+#, python-format
+msgid "Service Provider %(sp)s is disabled"
+msgstr ""
+
+#: keystone/contrib/oauth1/controllers.py:99
+msgid "Cannot change consumer secret"
+msgstr "Cannot change consumer secret"
+
+#: keystone/contrib/oauth1/controllers.py:131
+msgid "Cannot list request tokens with a token issued via delegation."
+msgstr ""
+
+#: keystone/contrib/oauth1/controllers.py:192
+#: keystone/contrib/oauth1/backends/sql.py:270
+msgid "User IDs do not match"
+msgstr "User IDs do not match"
+
+#: keystone/contrib/oauth1/controllers.py:199
+msgid "Could not find role"
+msgstr "Could not find role"
+
+#: keystone/contrib/oauth1/controllers.py:248
+msgid "Invalid signature"
+msgstr ""
+
+#: keystone/contrib/oauth1/controllers.py:299
+#: keystone/contrib/oauth1/controllers.py:377
+msgid "Request token is expired"
+msgstr "Request token is expired"
+
+#: keystone/contrib/oauth1/controllers.py:313
+msgid "There should not be any non-oauth parameters"
+msgstr "There should not be any non-oauth parameters"
+
+#: keystone/contrib/oauth1/controllers.py:317
+msgid "provided consumer key does not match stored consumer key"
+msgstr "provided consumer key does not match stored consumer key"
+
+#: keystone/contrib/oauth1/controllers.py:321
+msgid "provided verifier does not match stored verifier"
+msgstr "provided verifier does not match stored verifier"
+
+#: keystone/contrib/oauth1/controllers.py:325
+msgid "provided request key does not match stored request key"
+msgstr "provided request key does not match stored request key"
+
+#: keystone/contrib/oauth1/controllers.py:329
+msgid "Request Token does not have an authorizing user id"
+msgstr "Request Token does not have an authorizing user id"
+
+#: keystone/contrib/oauth1/controllers.py:366
+msgid "Cannot authorize a request token with a token issued via delegation."
+msgstr ""
+
+#: keystone/contrib/oauth1/controllers.py:396
+msgid "authorizing user does not have role required"
+msgstr "authorizing user does not have role required"
+
+#: keystone/contrib/oauth1/controllers.py:409
+msgid "User is not a member of the requested project"
+msgstr "User is not a member of the requested project"
+
+#: keystone/contrib/oauth1/backends/sql.py:91
+msgid "Consumer not found"
+msgstr "Consumer not found"
+
+#: keystone/contrib/oauth1/backends/sql.py:186
+msgid "Request token not found"
+msgstr "Request token not found"
+
+#: keystone/contrib/oauth1/backends/sql.py:250
+msgid "Access token not found"
+msgstr "Access token not found"
+
+#: keystone/contrib/revoke/controllers.py:33
+#, python-format
+msgid "invalid date format %s"
+msgstr ""
+
+#: keystone/contrib/revoke/core.py:150
+msgid ""
+"The revoke call must not have both domain_id and project_id. This is a "
+"bug in the Keystone server. The current request is aborted."
+msgstr ""
+
+#: keystone/contrib/revoke/core.py:218 keystone/token/provider.py:207
+#: keystone/token/provider.py:230 keystone/token/provider.py:296
+#: keystone/token/provider.py:303
+msgid "Failed to validate token"
+msgstr "Failed to validate token"
+
+#: keystone/identity/controllers.py:72
+msgid "Enabled field must be a boolean"
+msgstr "Enabled field must be a boolean"
+
+#: keystone/identity/controllers.py:98
+msgid "Enabled field should be a boolean"
+msgstr "Enabled field should be a boolean"
+
+#: keystone/identity/core.py:112
+#, python-format
+msgid "Database at /domains/%s/config"
+msgstr ""
+
+#: keystone/identity/core.py:287 keystone/identity/backends/ldap.py:59
+#: keystone/identity/backends/ldap.py:61 keystone/identity/backends/ldap.py:67
+#: keystone/identity/backends/ldap.py:69 keystone/identity/backends/sql.py:104
+#: keystone/identity/backends/sql.py:106
+msgid "Invalid user / password"
+msgstr ""
+
+#: keystone/identity/core.py:693
+#, python-format
+msgid "User is disabled: %s"
+msgstr "User is disabled: %s"
+
+#: keystone/identity/core.py:735
+msgid "Cannot change user ID"
+msgstr ""
+
+#: keystone/identity/backends/ldap.py:99
+msgid "Cannot change user name"
+msgstr ""
+
+#: keystone/identity/backends/ldap.py:188 keystone/identity/backends/sql.py:188
+#: keystone/identity/backends/sql.py:206
+#, python-format
+msgid "User '%(user_id)s' not found in group '%(group_id)s'"
+msgstr ""
+
+#: keystone/identity/backends/ldap.py:339
+#, python-format
+msgid "User %(user_id)s is already a member of group %(group_id)s"
+msgstr "User %(user_id)s is already a member of group %(group_id)s"
+
+#: keystone/models/token_model.py:61
+msgid "Found invalid token: scoped to both project and domain."
+msgstr ""
+
+#: keystone/openstack/common/versionutils.py:108
+#, python-format
+msgid ""
+"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and "
+"may be removed in %(remove_in)s."
+msgstr ""
+"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and "
+"may be removed in %(remove_in)s."
+
+#: keystone/openstack/common/versionutils.py:112
+#, python-format
+msgid ""
+"%(what)s is deprecated as of %(as_of)s and may be removed in "
+"%(remove_in)s. It will not be superseded."
+msgstr ""
+"%(what)s is deprecated as of %(as_of)s and may be removed in "
+"%(remove_in)s. It will not be superseded."
+
+#: keystone/openstack/common/versionutils.py:116
+#, python-format
+msgid "%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s."
+msgstr ""
+
+#: keystone/openstack/common/versionutils.py:119
+#, python-format
+msgid "%(what)s is deprecated as of %(as_of)s. It will not be superseded."
+msgstr ""
+
+#: keystone/openstack/common/versionutils.py:241
+#, python-format
+msgid "Deprecated: %s"
+msgstr "Deprecated: %s"
+
+#: keystone/openstack/common/versionutils.py:259
+#, python-format
+msgid "Fatal call to deprecated config: %(msg)s"
+msgstr "Fatal call to deprecated config: %(msg)s"
+
+#: keystone/resource/controllers.py:231
+msgid ""
+"Cannot use parents_as_list and parents_as_ids query params at the same "
+"time."
+msgstr ""
+
+#: keystone/resource/controllers.py:237
+msgid ""
+"Cannot use subtree_as_list and subtree_as_ids query params at the same "
+"time."
+msgstr ""
+
+#: keystone/resource/core.py:80
+#, python-format
+msgid "max hierarchy depth reached for %s branch."
+msgstr ""
+
+#: keystone/resource/core.py:97
+msgid "cannot create a project within a different domain than its parents."
+msgstr ""
+
+#: keystone/resource/core.py:101
+#, python-format
+msgid "cannot create a project in a branch containing a disabled project: %s"
+msgstr ""
+
+#: keystone/resource/core.py:123
+#, python-format
+msgid "Domain is disabled: %s"
+msgstr "Domain is disabled: %s"
+
+#: keystone/resource/core.py:141
+#, python-format
+msgid "Domain cannot be named %s"
+msgstr ""
+
+#: keystone/resource/core.py:144
+#, python-format
+msgid "Domain cannot have ID %s"
+msgstr ""
+
+#: keystone/resource/core.py:156
+#, python-format
+msgid "Project is disabled: %s"
+msgstr "Project is disabled: %s"
+
+#: keystone/resource/core.py:176
+#, python-format
+msgid "cannot enable project %s since it has disabled parents"
+msgstr ""
+
+#: keystone/resource/core.py:184
+#, python-format
+msgid "cannot disable project %s since its subtree contains enabled projects"
+msgstr ""
+
+#: keystone/resource/core.py:195
+msgid "Update of `parent_id` is not allowed."
+msgstr ""
+
+#: keystone/resource/core.py:222
+#, python-format
+msgid "cannot delete the project %s since it is not a leaf in the hierarchy."
+msgstr ""
+
+#: keystone/resource/core.py:376
+msgid "Multiple domains are not supported"
+msgstr ""
+
+#: keystone/resource/core.py:429
+msgid "delete the default domain"
+msgstr ""
+
+#: keystone/resource/core.py:440
+msgid "cannot delete a domain that is enabled, please disable it first."
+msgstr ""
+
+#: keystone/resource/core.py:841
+msgid "No options specified"
+msgstr "No options specified"
+
+#: keystone/resource/core.py:847
+#, python-format
+msgid ""
+"The value of group %(group)s specified in the config should be a "
+"dictionary of options"
+msgstr ""
+
+#: keystone/resource/core.py:871
+#, python-format
+msgid ""
+"Option %(option)s found with no group specified while checking domain "
+"configuration request"
+msgstr ""
+
+#: keystone/resource/core.py:878
+#, python-format
+msgid "Group %(group)s is not supported for domain specific configurations"
+msgstr ""
+
+#: keystone/resource/core.py:885
+#, python-format
+msgid ""
+"Option %(option)s in group %(group)s is not supported for domain specific"
+" configurations"
+msgstr ""
+
+#: keystone/resource/core.py:938
+msgid "An unexpected error occurred when retrieving domain configs"
+msgstr ""
+
+#: keystone/resource/core.py:1013 keystone/resource/core.py:1097
+#: keystone/resource/core.py:1167 keystone/resource/config_backends/sql.py:70
+#, python-format
+msgid "option %(option)s in group %(group)s"
+msgstr ""
+
+#: keystone/resource/core.py:1016 keystone/resource/core.py:1102
+#: keystone/resource/core.py:1163
+#, python-format
+msgid "group %(group)s"
+msgstr ""
+
+#: keystone/resource/core.py:1018
+msgid "any options"
+msgstr ""
+
+#: keystone/resource/core.py:1062
+#, python-format
+msgid ""
+"Trying to update option %(option)s in group %(group)s, so that, and only "
+"that, option must be specified  in the config"
+msgstr ""
+
+#: keystone/resource/core.py:1067
+#, python-format
+msgid ""
+"Trying to update group %(group)s, so that, and only that, group must be "
+"specified in the config"
+msgstr ""
+
+#: keystone/resource/core.py:1076
+#, python-format
+msgid ""
+"request to update group %(group)s, but config provided contains group "
+"%(group_other)s instead"
+msgstr ""
+
+#: keystone/resource/core.py:1083
+#, python-format
+msgid ""
+"Trying to update option %(option)s in group %(group)s, but config "
+"provided contains option %(option_other)s instead"
+msgstr ""
+
+#: keystone/resource/backends/ldap.py:151
+#: keystone/resource/backends/ldap.py:159
+#: keystone/resource/backends/ldap.py:163
+msgid "Domains are read-only against LDAP"
+msgstr ""
+
+#: keystone/server/eventlet.py:77
+msgid ""
+"Running keystone via eventlet is deprecated as of Kilo in favor of "
+"running in a WSGI server (e.g. mod_wsgi). Support for keystone under "
+"eventlet will be removed in the \"M\"-Release."
+msgstr ""
+
+#: keystone/server/eventlet.py:90
+#, python-format
+msgid "Failed to start the %(name)s server"
+msgstr ""
+
+#: keystone/token/controllers.py:391
+#, python-format
+msgid "User %(u_id)s is unauthorized for tenant %(t_id)s"
+msgstr "User %(u_id)s is unauthorized for tenant %(t_id)s"
+
+#: keystone/token/controllers.py:410 keystone/token/controllers.py:413
+msgid "Token does not belong to specified tenant."
+msgstr "Token does not belong to specified tenant."
+
+#: keystone/token/persistence/backends/kvs.py:133
+#, python-format
+msgid "Unknown token version %s"
+msgstr ""
+
+#: keystone/token/providers/common.py:250
+#: keystone/token/providers/common.py:355
+#, python-format
+msgid "User %(user_id)s has no access to project %(project_id)s"
+msgstr "User %(user_id)s has no access to project %(project_id)s"
+
+#: keystone/token/providers/common.py:255
+#: keystone/token/providers/common.py:360
+#, python-format
+msgid "User %(user_id)s has no access to domain %(domain_id)s"
+msgstr "User %(user_id)s has no access to domain %(domain_id)s"
+
+#: keystone/token/providers/common.py:282
+msgid "Trustor is disabled."
+msgstr "Trustor is disabled."
+
+#: keystone/token/providers/common.py:346
+msgid "Trustee has no delegated roles."
+msgstr "Trustee has no delegated roles."
+
+#: keystone/token/providers/common.py:407
+#, python-format
+msgid "Invalid audit info data type: %(data)s (%(type)s)"
+msgstr ""
+
+#: keystone/token/providers/common.py:435
+msgid "User is not a trustee."
+msgstr "User is not a trustee."
+
+#: keystone/token/providers/common.py:579
+msgid ""
+"Attempting to use OS-FEDERATION token with V2 Identity Service, use V3 "
+"Authentication"
+msgstr ""
+
+#: keystone/token/providers/common.py:597
+msgid "Domain scoped token is not supported"
+msgstr "Domain scoped token is not supported"
+
+#: keystone/token/providers/pki.py:48 keystone/token/providers/pkiz.py:30
+msgid "Unable to sign token."
+msgstr "Unable to sign token."
+
+#: keystone/token/providers/fernet/core.py:215
+msgid ""
+"This is not a v2.0 Fernet token. Use v3 for trust, domain, or federated "
+"tokens."
+msgstr ""
+
+#: keystone/token/providers/fernet/token_formatters.py:189
+#, python-format
+msgid "This is not a recognized Fernet payload version: %s"
+msgstr ""
+
+#: keystone/trust/controllers.py:148
+msgid "Redelegation allowed for delegated by trust only"
+msgstr ""
+
+#: keystone/trust/controllers.py:181
+msgid "The authenticated user should match the trustor."
+msgstr ""
+
+#: keystone/trust/controllers.py:186
+msgid "At least one role should be specified."
+msgstr ""
+
+#: keystone/trust/core.py:57
+#, python-format
+msgid ""
+"Remaining redelegation depth of %(redelegation_depth)d out of allowed "
+"range of [0..%(max_count)d]"
+msgstr ""
+
+#: keystone/trust/core.py:66
+#, python-format
+msgid ""
+"Field \"remaining_uses\" is set to %(value)s while it must not be set in "
+"order to redelegate a trust"
+msgstr ""
+
+#: keystone/trust/core.py:77
+msgid "Requested expiration time is more than redelegated trust can provide"
+msgstr ""
+
+#: keystone/trust/core.py:87
+msgid "Some of requested roles are not in redelegated trust"
+msgstr ""
+
+#: keystone/trust/core.py:116
+msgid "One of the trust agents is disabled or deleted"
+msgstr ""
+
+#: keystone/trust/core.py:135
+msgid "remaining_uses must be a positive integer or null."
+msgstr ""
+
+#: keystone/trust/core.py:141
+#, python-format
+msgid ""
+"Requested redelegation depth of %(requested_count)d is greater than "
+"allowed %(max_count)d"
+msgstr ""
+
+#: keystone/trust/core.py:147
+msgid "remaining_uses must not be set if redelegation is allowed"
+msgstr ""
+
+#: keystone/trust/core.py:157
+msgid ""
+"Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting"
+" this parameter is advised."
+msgstr ""
+
diff --git a/keystone-moon/keystone/locale/en_GB/LC_MESSAGES/keystone-log-info.po b/keystone-moon/keystone/locale/en_GB/LC_MESSAGES/keystone-log-info.po
new file mode 100644 (file)
index 0000000..a0da5ee
--- /dev/null
@@ -0,0 +1,214 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+# Andi Chandler <andi@gowling.com>, 2014
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-03-09 06:03+0000\n"
+"PO-Revision-Date: 2015-03-07 04:31+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: English (United Kingdom) (http://www.transifex.com/projects/p/"
+"keystone/language/en_GB/)\n"
+"Language: en_GB\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+#: keystone/assignment/core.py:250
+#, python-format
+msgid "Creating the default role %s because it does not exist."
+msgstr ""
+
+#: keystone/assignment/core.py:258
+#, python-format
+msgid "Creating the default role %s failed because it was already created"
+msgstr ""
+
+#: keystone/auth/controllers.py:64
+msgid "Loading auth-plugins by class-name is deprecated."
+msgstr ""
+
+#: keystone/auth/controllers.py:106
+#, python-format
+msgid ""
+"\"expires_at\" has conflicting values %(existing)s and %(new)s.  Will use "
+"the earliest value."
+msgstr ""
+"\"expires_at\" has conflicting values %(existing)s and %(new)s.  Will use "
+"the earliest value."
+
+#: keystone/common/openssl.py:81
+#, python-format
+msgid "Running command - %s"
+msgstr ""
+
+#: keystone/common/wsgi.py:79
+msgid "No bind information present in token"
+msgstr ""
+
+#: keystone/common/wsgi.py:83
+#, python-format
+msgid "Named bind mode %s not in bind information"
+msgstr ""
+
+#: keystone/common/wsgi.py:90
+msgid "Kerberos credentials required and not present"
+msgstr ""
+
+#: keystone/common/wsgi.py:94
+msgid "Kerberos credentials do not match those in bind"
+msgstr ""
+
+#: keystone/common/wsgi.py:98
+msgid "Kerberos bind authentication successful"
+msgstr ""
+
+#: keystone/common/wsgi.py:105
+#, python-format
+msgid "Couldn't verify unknown bind: {%(bind_type)s: %(identifier)s}"
+msgstr ""
+
+#: keystone/common/environment/eventlet_server.py:103
+#, python-format
+msgid "Starting %(arg0)s on %(host)s:%(port)s"
+msgstr ""
+
+#: keystone/common/kvs/core.py:138
+#, python-format
+msgid "Adding proxy '%(proxy)s' to KVS %(name)s."
+msgstr ""
+
+#: keystone/common/kvs/core.py:188
+#, python-format
+msgid "Using %(func)s as KVS region %(name)s key_mangler"
+msgstr ""
+
+#: keystone/common/kvs/core.py:200
+#, python-format
+msgid "Using default dogpile sha1_mangle_key as KVS region %s key_mangler"
+msgstr ""
+
+#: keystone/common/kvs/core.py:210
+#, python-format
+msgid "KVS region %s key_mangler disabled."
+msgstr ""
+
+#: keystone/contrib/example/core.py:64 keystone/contrib/example/core.py:73
+#, python-format
+msgid ""
+"Received the following notification: service %(service)s, resource_type: "
+"%(resource_type)s, operation %(operation)s payload %(payload)s"
+msgstr ""
+
+#: keystone/openstack/common/eventlet_backdoor.py:146
+#, python-format
+msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
+msgstr "Eventlet backdoor listening on %(port)s for process %(pid)d"
+
+#: keystone/openstack/common/service.py:173
+#, python-format
+msgid "Caught %s, exiting"
+msgstr "Caught %s, exiting"
+
+#: keystone/openstack/common/service.py:231
+msgid "Parent process has died unexpectedly, exiting"
+msgstr "Parent process has died unexpectedly, exiting"
+
+#: keystone/openstack/common/service.py:262
+#, python-format
+msgid "Child caught %s, exiting"
+msgstr "Child caught %s, exiting"
+
+#: keystone/openstack/common/service.py:301
+msgid "Forking too fast, sleeping"
+msgstr "Forking too fast, sleeping"
+
+#: keystone/openstack/common/service.py:320
+#, python-format
+msgid "Started child %d"
+msgstr "Started child %d"
+
+#: keystone/openstack/common/service.py:330
+#, python-format
+msgid "Starting %d workers"
+msgstr "Starting %d workers"
+
+#: keystone/openstack/common/service.py:347
+#, python-format
+msgid "Child %(pid)d killed by signal %(sig)d"
+msgstr "Child %(pid)d killed by signal %(sig)d"
+
+#: keystone/openstack/common/service.py:351
+#, python-format
+msgid "Child %(pid)s exited with status %(code)d"
+msgstr "Child %(pid)s exited with status %(code)d"
+
+#: keystone/openstack/common/service.py:390
+#, python-format
+msgid "Caught %s, stopping children"
+msgstr "Caught %s, stopping children"
+
+#: keystone/openstack/common/service.py:399
+msgid "Wait called after thread killed. Cleaning up."
+msgstr ""
+
+#: keystone/openstack/common/service.py:415
+#, python-format
+msgid "Waiting on %d children to exit"
+msgstr "Waiting on %d children to exit"
+
+#: keystone/token/persistence/backends/sql.py:279
+#, python-format
+msgid "Total expired tokens removed: %d"
+msgstr "Total expired tokens removed: %d"
+
+#: keystone/token/providers/fernet/utils.py:72
+msgid ""
+"[fernet_tokens] key_repository does not appear to exist; attempting to "
+"create it"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:130
+#, python-format
+msgid "Created a new key: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:143
+msgid "Key repository is already initialized; aborting."
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:179
+#, python-format
+msgid "Starting key rotation with %(count)s key files: %(list)s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:185
+#, python-format
+msgid "Current primary key is: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:187
+#, python-format
+msgid "Next primary key will be: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:197
+#, python-format
+msgid "Promoted key 0 to be the primary: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:213
+#, python-format
+msgid "Excess keys to purge: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:237
+#, python-format
+msgid "Loaded %(count)s encryption keys from: %(dir)s"
+msgstr ""
diff --git a/keystone-moon/keystone/locale/es/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/es/LC_MESSAGES/keystone-log-critical.po
new file mode 100644 (file)
index 0000000..6ebff22
--- /dev/null
@@ -0,0 +1,25 @@
+# Translations template for keystone.
+# Copyright (C) 2014 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"PO-Revision-Date: 2014-08-31 15:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: Spanish (http://www.transifex.com/projects/p/keystone/"
+"language/es/)\n"
+"Language: es\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+#: keystone/catalog/backends/templated.py:106
+#, python-format
+msgid "Unable to open template file %s"
+msgstr "No se puede abrir el archivo de plantilla %s"
diff --git a/keystone-moon/keystone/locale/es/LC_MESSAGES/keystone-log-error.po b/keystone-moon/keystone/locale/es/LC_MESSAGES/keystone-log-error.po
new file mode 100644 (file)
index 0000000..d1c2eaa
--- /dev/null
@@ -0,0 +1,177 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-03-09 06:03+0000\n"
+"PO-Revision-Date: 2015-03-07 04:31+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: Spanish (http://www.transifex.com/projects/p/keystone/"
+"language/es/)\n"
+"Language: es\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+#: keystone/notifications.py:304
+msgid "Failed to construct notifier"
+msgstr ""
+
+#: keystone/notifications.py:389
+#, python-format
+msgid "Failed to send %(res_id)s %(event_type)s notification"
+msgstr ""
+
+#: keystone/notifications.py:606
+#, python-format
+msgid "Failed to send %(action)s %(event_type)s notification"
+msgstr ""
+
+#: keystone/catalog/core.py:62
+#, python-format
+msgid "Malformed endpoint - %(url)r is not a string"
+msgstr ""
+
+#: keystone/catalog/core.py:66
+#, python-format
+msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s"
+msgstr ""
+"Punto final formado incorrectamente %(url)s - clave desconocida %(keyerror)s"
+
+#: keystone/catalog/core.py:71
+#, python-format
+msgid ""
+"Malformed endpoint '%(url)s'. The following type error occurred during "
+"string substitution: %(typeerror)s"
+msgstr ""
+
+#: keystone/catalog/core.py:77
+#, python-format
+msgid ""
+"Malformed endpoint %s - incomplete format (are you missing a type notifier ?)"
+msgstr ""
+
+#: keystone/common/openssl.py:93
+#, python-format
+msgid "Command %(to_exec)s exited with %(retcode)s- %(output)s"
+msgstr ""
+
+#: keystone/common/openssl.py:121
+#, python-format
+msgid "Failed to remove file %(file_path)r: %(error)s"
+msgstr ""
+
+#: keystone/common/utils.py:239
+msgid ""
+"Error setting up the debug environment. Verify that the option --debug-url "
+"has the format <host>:<port> and that a debugger processes is listening on "
+"that port."
+msgstr ""
+"Error configurando el entorno de depuración. Verifique que la opción --debug-"
+"url tiene el formato <host>:<port> y que un proceso de depuración está "
+"publicado en ese host y puerto"
+
+#: keystone/common/cache/core.py:100
+#, python-format
+msgid ""
+"Unable to build cache config-key. Expected format \"<argname>:<value>\". "
+"Skipping unknown format: %s"
+msgstr ""
+
+#: keystone/common/environment/eventlet_server.py:99
+#, python-format
+msgid "Could not bind to %(host)s:%(port)s"
+msgstr "No se puede asociar a %(host)s:%(port)s"
+
+#: keystone/common/environment/eventlet_server.py:185
+msgid "Server error"
+msgstr "Error del servidor"
+
+#: keystone/contrib/endpoint_policy/core.py:129
+#: keystone/contrib/endpoint_policy/core.py:228
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found in region tree - %(region_id)s."
+msgstr ""
+
+#: keystone/contrib/federation/idp.py:410
+#, python-format
+msgid "Error when signing assertion, reason: %(reason)s"
+msgstr ""
+
+#: keystone/contrib/oauth1/core.py:136
+msgid "Cannot retrieve Authorization headers"
+msgstr ""
+
+#: keystone/openstack/common/loopingcall.py:95
+msgid "in fixed duration looping call"
+msgstr "en llamada en bucle de duración fija"
+
+#: keystone/openstack/common/loopingcall.py:138
+msgid "in dynamic looping call"
+msgstr "en llamada en bucle dinámica"
+
+#: keystone/openstack/common/service.py:268
+msgid "Unhandled exception"
+msgstr "Excepción no controlada"
+
+#: keystone/resource/core.py:477
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found projects hierarchy - "
+"%(project_id)s."
+msgstr ""
+
+#: keystone/resource/core.py:939
+#, python-format
+msgid ""
+"Unexpected results in response for domain config - %(count)s responses, "
+"first option is %(option)s, expected option %(expected)s"
+msgstr ""
+
+#: keystone/resource/backends/sql.py:102 keystone/resource/backends/sql.py:121
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found in projects hierarchy - "
+"%(project_id)s."
+msgstr ""
+
+#: keystone/token/provider.py:292
+#, python-format
+msgid "Unexpected error or malformed token determining token expiry: %s"
+msgstr ""
+
+#: keystone/token/persistence/backends/kvs.py:226
+#, python-format
+msgid ""
+"Reinitializing revocation list due to error in loading revocation list from "
+"backend.  Expected `list` type got `%(type)s`. Old revocation list data: "
+"%(list)r"
+msgstr ""
+
+#: keystone/token/providers/common.py:611
+msgid "Failed to validate token"
+msgstr "Ha fallado la validación del token"
+
+#: keystone/token/providers/pki.py:47
+msgid "Unable to sign token"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:38
+#, python-format
+msgid ""
+"Either [fernet_tokens] key_repository does not exist or Keystone does not "
+"have sufficient permission to access it: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:79
+msgid ""
+"Failed to create [fernet_tokens] key_repository: either it already exists or "
+"you don't have sufficient permissions to create it"
+msgstr ""
diff --git a/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-critical.po
new file mode 100644 (file)
index 0000000..c40440b
--- /dev/null
@@ -0,0 +1,25 @@
+# Translations template for keystone.
+# Copyright (C) 2014 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"PO-Revision-Date: 2014-08-31 15:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: French (http://www.transifex.com/projects/p/keystone/language/"
+"fr/)\n"
+"Language: fr\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=2; plural=(n > 1);\n"
+
+#: keystone/catalog/backends/templated.py:106
+#, python-format
+msgid "Unable to open template file %s"
+msgstr "Impossible d'ouvrir le fichier modèle %s"
diff --git a/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-error.po b/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-error.po
new file mode 100644 (file)
index 0000000..d8dc409
--- /dev/null
@@ -0,0 +1,184 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+# Bruno Cornec <bruno.cornec@hp.com>, 2014
+# Maxime COQUEREL <max.coquerel@gmail.com>, 2014
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-03-09 06:03+0000\n"
+"PO-Revision-Date: 2015-03-07 04:31+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: French (http://www.transifex.com/projects/p/keystone/language/"
+"fr/)\n"
+"Language: fr\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=2; plural=(n > 1);\n"
+
+#: keystone/notifications.py:304
+msgid "Failed to construct notifier"
+msgstr "Échec de construction de la notification"
+
+#: keystone/notifications.py:389
+#, python-format
+msgid "Failed to send %(res_id)s %(event_type)s notification"
+msgstr "Échec de l'envoi de la notification %(res_id)s %(event_type)s"
+
+#: keystone/notifications.py:606
+#, python-format
+msgid "Failed to send %(action)s %(event_type)s notification"
+msgstr "Échec de l'envoi de la notification %(action)s %(event_type)s "
+
+#: keystone/catalog/core.py:62
+#, python-format
+msgid "Malformed endpoint - %(url)r is not a string"
+msgstr "Critère mal formé - %(url)r n'est pas une chaine de caractère"
+
+#: keystone/catalog/core.py:66
+#, python-format
+msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s"
+msgstr "Noeud final incorrect %(url)s - clé inconnue %(keyerror)s"
+
+#: keystone/catalog/core.py:71
+#, python-format
+msgid ""
+"Malformed endpoint '%(url)s'. The following type error occurred during "
+"string substitution: %(typeerror)s"
+msgstr ""
+"Noeud final incorrect '%(url)s'. L'erreur suivante est survenue pendant la "
+"substitution de chaine : %(typeerror)s"
+
+#: keystone/catalog/core.py:77
+#, python-format
+msgid ""
+"Malformed endpoint %s - incomplete format (are you missing a type notifier ?)"
+msgstr ""
+"Noeud final incorrect '%s - Format incomplet (un type de notification manque-"
+"t-il ?)"
+
+#: keystone/common/openssl.py:93
+#, python-format
+msgid "Command %(to_exec)s exited with %(retcode)s- %(output)s"
+msgstr "La commande %(to_exec)s a retourné %(retcode)s- %(output)s"
+
+#: keystone/common/openssl.py:121
+#, python-format
+msgid "Failed to remove file %(file_path)r: %(error)s"
+msgstr "Échec de la suppression du fichier %(file_path)r: %(error)s"
+
+#: keystone/common/utils.py:239
+msgid ""
+"Error setting up the debug environment. Verify that the option --debug-url "
+"has the format <host>:<port> and that a debugger processes is listening on "
+"that port."
+msgstr ""
+"Erreur de configuration de l'environnement de débogage. Vérifiez que "
+"l'option --debug-url a le format <host>:<port> et que le processus de "
+"débogage écoute sur ce port."
+
+#: keystone/common/cache/core.py:100
+#, python-format
+msgid ""
+"Unable to build cache config-key. Expected format \"<argname>:<value>\". "
+"Skipping unknown format: %s"
+msgstr ""
+
+#: keystone/common/environment/eventlet_server.py:99
+#, python-format
+msgid "Could not bind to %(host)s:%(port)s"
+msgstr "Impossible de s'attacher à %(host)s:%(port)s"
+
+#: keystone/common/environment/eventlet_server.py:185
+msgid "Server error"
+msgstr "Erreur serveur"
+
+#: keystone/contrib/endpoint_policy/core.py:129
+#: keystone/contrib/endpoint_policy/core.py:228
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found in region tree - %(region_id)s."
+msgstr ""
+"Référence circulaire ou entrée dupliquée trouvée dans l'arbre de la région - "
+"%(region_id)s."
+
+#: keystone/contrib/federation/idp.py:410
+#, python-format
+msgid "Error when signing assertion, reason: %(reason)s"
+msgstr "Erreur lors de la signature d'une assertion : %(reason)s"
+
+#: keystone/contrib/oauth1/core.py:136
+msgid "Cannot retrieve Authorization headers"
+msgstr ""
+
+#: keystone/openstack/common/loopingcall.py:95
+msgid "in fixed duration looping call"
+msgstr "dans l'appel en boucle de durée fixe"
+
+#: keystone/openstack/common/loopingcall.py:138
+msgid "in dynamic looping call"
+msgstr "dans l'appel en boucle dynamique"
+
+#: keystone/openstack/common/service.py:268
+msgid "Unhandled exception"
+msgstr "Exception non gérée"
+
+#: keystone/resource/core.py:477
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found projects hierarchy - "
+"%(project_id)s."
+msgstr ""
+
+#: keystone/resource/core.py:939
+#, python-format
+msgid ""
+"Unexpected results in response for domain config - %(count)s responses, "
+"first option is %(option)s, expected option %(expected)s"
+msgstr ""
+
+#: keystone/resource/backends/sql.py:102 keystone/resource/backends/sql.py:121
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found in projects hierarchy - "
+"%(project_id)s."
+msgstr ""
+
+#: keystone/token/provider.py:292
+#, python-format
+msgid "Unexpected error or malformed token determining token expiry: %s"
+msgstr ""
+
+#: keystone/token/persistence/backends/kvs.py:226
+#, python-format
+msgid ""
+"Reinitializing revocation list due to error in loading revocation list from "
+"backend.  Expected `list` type got `%(type)s`. Old revocation list data: "
+"%(list)r"
+msgstr ""
+
+#: keystone/token/providers/common.py:611
+msgid "Failed to validate token"
+msgstr "Echec de validation du token"
+
+#: keystone/token/providers/pki.py:47
+msgid "Unable to sign token"
+msgstr "Impossible de signer le jeton"
+
+#: keystone/token/providers/fernet/utils.py:38
+#, python-format
+msgid ""
+"Either [fernet_tokens] key_repository does not exist or Keystone does not "
+"have sufficient permission to access it: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:79
+msgid ""
+"Failed to create [fernet_tokens] key_repository: either it already exists or "
+"you don't have sufficient permissions to create it"
+msgstr ""
diff --git a/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-info.po b/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-info.po
new file mode 100644 (file)
index 0000000..065540d
--- /dev/null
@@ -0,0 +1,223 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+# Bruno Cornec <bruno.cornec@hp.com>, 2014
+# Maxime COQUEREL <max.coquerel@gmail.com>, 2014
+# Andrew_Melim <nokostya.translation@gmail.com>, 2014
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-03-09 06:03+0000\n"
+"PO-Revision-Date: 2015-03-08 17:01+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: French (http://www.transifex.com/projects/p/keystone/language/"
+"fr/)\n"
+"Language: fr\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=2; plural=(n > 1);\n"
+
+#: keystone/assignment/core.py:250
+#, python-format
+msgid "Creating the default role %s because it does not exist."
+msgstr "Création du rôle par défaut %s, car il n'existe pas"
+
+#: keystone/assignment/core.py:258
+#, python-format
+msgid "Creating the default role %s failed because it was already created"
+msgstr ""
+
+#: keystone/auth/controllers.py:64
+msgid "Loading auth-plugins by class-name is deprecated."
+msgstr "Chargement de auth-plugins par class-name est déprécié"
+
+#: keystone/auth/controllers.py:106
+#, python-format
+msgid ""
+"\"expires_at\" has conflicting values %(existing)s and %(new)s.  Will use "
+"the earliest value."
+msgstr ""
+"\"expires_at\" a des valeurs conflictuelles %(existing)s et %(new)s. "
+"Utilsation de la première valeur."
+
+#: keystone/common/openssl.py:81
+#, python-format
+msgid "Running command - %s"
+msgstr "Exécution de la commande %s"
+
+#: keystone/common/wsgi.py:79
+msgid "No bind information present in token"
+msgstr "Aucune information d'attachement n'est présente dans le jeton"
+
+#: keystone/common/wsgi.py:83
+#, python-format
+msgid "Named bind mode %s not in bind information"
+msgstr ""
+"Le mode d'attachement nommé %s n'est pas dans l'information d'attachement"
+
+#: keystone/common/wsgi.py:90
+msgid "Kerberos credentials required and not present"
+msgstr "L'identitification Kerberos est requise mais non présente"
+
+#: keystone/common/wsgi.py:94
+msgid "Kerberos credentials do not match those in bind"
+msgstr "L'identification Kerberos ne correspond pas à celle de l'attachement"
+
+#: keystone/common/wsgi.py:98
+msgid "Kerberos bind authentication successful"
+msgstr "Attachement Kerberos identifié correctement"
+
+#: keystone/common/wsgi.py:105
+#, python-format
+msgid "Couldn't verify unknown bind: {%(bind_type)s: %(identifier)s}"
+msgstr ""
+"Impossible de vérifier l'attachement inconnu:  {%(bind_type)s: "
+"%(identifier)s}"
+
+#: keystone/common/environment/eventlet_server.py:103
+#, python-format
+msgid "Starting %(arg0)s on %(host)s:%(port)s"
+msgstr "Démarrage de %(arg0)s sur %(host)s:%(port)s"
+
+#: keystone/common/kvs/core.py:138
+#, python-format
+msgid "Adding proxy '%(proxy)s' to KVS %(name)s."
+msgstr "Ahour du mandataire '%(proxy)s' au KVS %(name)s."
+
+#: keystone/common/kvs/core.py:188
+#, python-format
+msgid "Using %(func)s as KVS region %(name)s key_mangler"
+msgstr "Utilise %(func)s comme région KVS %(name)s key_mangler"
+
+#: keystone/common/kvs/core.py:200
+#, python-format
+msgid "Using default dogpile sha1_mangle_key as KVS region %s key_mangler"
+msgstr ""
+"Utilisation du dogpile sha1_mangle_key par défaut comme région KVS %s "
+"key_mangler"
+
+#: keystone/common/kvs/core.py:210
+#, python-format
+msgid "KVS region %s key_mangler disabled."
+msgstr "Région KVS %s key_mangler désactivée"
+
+#: keystone/contrib/example/core.py:64 keystone/contrib/example/core.py:73
+#, python-format
+msgid ""
+"Received the following notification: service %(service)s, resource_type: "
+"%(resource_type)s, operation %(operation)s payload %(payload)s"
+msgstr ""
+"Réception de la notification suivante: service %(service)s, resource_type: "
+"%(resource_type)s, operation %(operation)s payload %(payload)s"
+
+#: keystone/openstack/common/eventlet_backdoor.py:146
+#, python-format
+msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
+msgstr "Eventlet backdoor en écoute sur le port %(port)s for process %(pid)d"
+
+#: keystone/openstack/common/service.py:173
+#, python-format
+msgid "Caught %s, exiting"
+msgstr "%s interceptée, sortie"
+
+#: keystone/openstack/common/service.py:231
+msgid "Parent process has died unexpectedly, exiting"
+msgstr "Processus parent arrêté de manière inattendue, sortie"
+
+#: keystone/openstack/common/service.py:262
+#, python-format
+msgid "Child caught %s, exiting"
+msgstr "L'enfant a reçu %s, sortie"
+
+#: keystone/openstack/common/service.py:301
+msgid "Forking too fast, sleeping"
+msgstr "Bifurcation trop rapide, pause"
+
+#: keystone/openstack/common/service.py:320
+#, python-format
+msgid "Started child %d"
+msgstr "Enfant démarré %d"
+
+#: keystone/openstack/common/service.py:330
+#, python-format
+msgid "Starting %d workers"
+msgstr "Démarrage des travailleurs %d"
+
+#: keystone/openstack/common/service.py:347
+#, python-format
+msgid "Child %(pid)d killed by signal %(sig)d"
+msgstr "Enfant %(pid)d arrêté par le signal %(sig)d"
+
+#: keystone/openstack/common/service.py:351
+#, python-format
+msgid "Child %(pid)s exited with status %(code)d"
+msgstr "Processus fils %(pid)s terminé avec le status %(code)d"
+
+#: keystone/openstack/common/service.py:390
+#, python-format
+msgid "Caught %s, stopping children"
+msgstr "%s interceptée, arrêt de l'enfant"
+
+#: keystone/openstack/common/service.py:399
+msgid "Wait called after thread killed. Cleaning up."
+msgstr "Pause demandée après suppression de thread. Nettoyage."
+
+#: keystone/openstack/common/service.py:415
+#, python-format
+msgid "Waiting on %d children to exit"
+msgstr "En attente %d enfants pour sortie"
+
+#: keystone/token/persistence/backends/sql.py:279
+#, python-format
+msgid "Total expired tokens removed: %d"
+msgstr "Total des jetons expirés effacés: %d"
+
+#: keystone/token/providers/fernet/utils.py:72
+msgid ""
+"[fernet_tokens] key_repository does not appear to exist; attempting to "
+"create it"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:130
+#, python-format
+msgid "Created a new key: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:143
+msgid "Key repository is already initialized; aborting."
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:179
+#, python-format
+msgid "Starting key rotation with %(count)s key files: %(list)s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:185
+#, python-format
+msgid "Current primary key is: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:187
+#, python-format
+msgid "Next primary key will be: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:197
+#, python-format
+msgid "Promoted key 0 to be the primary: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:213
+#, python-format
+msgid "Excess keys to purge: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:237
+#, python-format
+msgid "Loaded %(count)s encryption keys from: %(dir)s"
+msgstr ""
diff --git a/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-warning.po b/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-warning.po
new file mode 100644 (file)
index 0000000..a83b88a
--- /dev/null
@@ -0,0 +1,303 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+# Bruno Cornec <bruno.cornec@hp.com>, 2014
+# Maxime COQUEREL <max.coquerel@gmail.com>, 2014
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-03-19 06:04+0000\n"
+"PO-Revision-Date: 2015-03-19 02:24+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: French (http://www.transifex.com/projects/p/keystone/language/"
+"fr/)\n"
+"Language: fr\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=2; plural=(n > 1);\n"
+
+#: keystone/cli.py:159
+msgid "keystone-manage pki_setup is not recommended for production use."
+msgstr ""
+"keystone-manage pki_setup n'est pas recommandé pour une utilisation en "
+"production."
+
+#: keystone/cli.py:178
+msgid "keystone-manage ssl_setup is not recommended for production use."
+msgstr ""
+"keystone-manage ssl_setup n'est pas recommandé pour une utilisation en "
+"production."
+
+#: keystone/cli.py:493
+#, python-format
+msgid "Ignoring file (%s) while scanning domain config directory"
+msgstr ""
+
+#: keystone/exception.py:49
+msgid "missing exception kwargs (programmer error)"
+msgstr ""
+
+#: keystone/assignment/controllers.py:60
+#, python-format
+msgid "Authentication failed: %s"
+msgstr "L'authentification a échoué: %s"
+
+#: keystone/assignment/controllers.py:576
+#, python-format
+msgid ""
+"Group %(group)s not found for role-assignment - %(target)s with Role: "
+"%(role)s"
+msgstr ""
+
+#: keystone/auth/controllers.py:449
+#, python-format
+msgid ""
+"User %(user_id)s doesn't have access to default project %(project_id)s. The "
+"token will be unscoped rather than scoped to the project."
+msgstr ""
+
+#: keystone/auth/controllers.py:457
+#, python-format
+msgid ""
+"User %(user_id)s's default project %(project_id)s is disabled. The token "
+"will be unscoped rather than scoped to the project."
+msgstr ""
+
+#: keystone/auth/controllers.py:466
+#, python-format
+msgid ""
+"User %(user_id)s's default project %(project_id)s not found. The token will "
+"be unscoped rather than scoped to the project."
+msgstr ""
+
+#: keystone/common/authorization.py:55
+msgid "RBAC: Invalid user data in token"
+msgstr "RBAC: Donnée utilisation non valide dans le token"
+
+#: keystone/common/controller.py:79 keystone/middleware/core.py:224
+msgid "RBAC: Invalid token"
+msgstr "RBAC : Jeton non valide"
+
+#: keystone/common/controller.py:104 keystone/common/controller.py:201
+#: keystone/common/controller.py:740
+msgid "RBAC: Bypassing authorization"
+msgstr "RBAC : Autorisation ignorée"
+
+#: keystone/common/controller.py:669 keystone/common/controller.py:704
+msgid "Invalid token found while getting domain ID for list request"
+msgstr ""
+
+#: keystone/common/controller.py:677
+msgid "No domain information specified as part of list request"
+msgstr ""
+
+#: keystone/common/utils.py:103
+#, python-format
+msgid "Truncating user password to %d characters."
+msgstr ""
+
+#: keystone/common/wsgi.py:242
+#, python-format
+msgid "Authorization failed. %(exception)s from %(remote_addr)s"
+msgstr "Echec d'autorisation. %(exception)s depuis %(remote_addr)s"
+
+#: keystone/common/wsgi.py:361
+msgid "Invalid token in _get_trust_id_for_request"
+msgstr "Jeton invalide dans _get_trust_id_for_request"
+
+#: keystone/common/cache/backends/mongo.py:403
+#, python-format
+msgid ""
+"TTL index already exists on db collection <%(c_name)s>, remove index <"
+"%(indx_name)s> first to make updated mongo_ttl_seconds value to be  effective"
+msgstr ""
+
+#: keystone/common/kvs/core.py:134
+#, python-format
+msgid "%s is not a dogpile.proxy.ProxyBackend"
+msgstr "%s n'est pas un dogpile.proxy.ProxyBackend"
+
+#: keystone/common/kvs/core.py:403
+#, python-format
+msgid "KVS lock released (timeout reached) for: %s"
+msgstr "Verrou KVS relaché (temps limite atteint) pour : %s"
+
+#: keystone/common/ldap/core.py:1026
+msgid ""
+"LDAP Server does not support paging. Disable paging in keystone.conf to "
+"avoid this message."
+msgstr ""
+"Le serveur LDAP ne prend pas en charge la pagination. Désactivez la "
+"pagination dans keystone.conf pour éviter de recevoir ce message."
+
+#: keystone/common/ldap/core.py:1225
+#, python-format
+msgid ""
+"Invalid additional attribute mapping: \"%s\". Format must be "
+"<ldap_attribute>:<keystone_attribute>"
+msgstr ""
+"Mauvais mappage d'attribut additionnel: \"%s\". Le format doit être "
+"<ldap_attribute>:<keystone_attribute>"
+
+#: keystone/common/ldap/core.py:1336
+#, python-format
+msgid ""
+"ID attribute %(id_attr)s for LDAP object %(dn)s has multiple values and "
+"therefore cannot be used as an ID. Will get the ID from DN instead"
+msgstr ""
+"L'attribut ID %(id_attr)s pour l'objet LDAP %(dn)s a de multiples valeurs et "
+"par conséquent ne peut être utilisé comme un ID. Obtention de l'ID depuis le "
+"DN à la place."
+
+#: keystone/common/ldap/core.py:1669
+#, python-format
+msgid ""
+"When deleting entries for %(search_base)s, could not delete nonexistent "
+"entries %(entries)s%(dots)s"
+msgstr ""
+
+#: keystone/contrib/endpoint_policy/core.py:91
+#, python-format
+msgid ""
+"Endpoint %(endpoint_id)s referenced in association for policy %(policy_id)s "
+"not found."
+msgstr ""
+"Le point d'entrée %(endpoint_id)s référencé en association avec la politique "
+"%(policy_id)s est introuvable."
+
+#: keystone/contrib/endpoint_policy/core.py:179
+#, python-format
+msgid ""
+"Unsupported policy association found - Policy %(policy_id)s, Endpoint "
+"%(endpoint_id)s, Service %(service_id)s, Region %(region_id)s, "
+msgstr ""
+
+#: keystone/contrib/endpoint_policy/core.py:195
+#, python-format
+msgid ""
+"Policy %(policy_id)s referenced in association for endpoint %(endpoint_id)s "
+"not found."
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:200
+#, python-format
+msgid "Impossible to identify the IdP %s "
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:523
+msgid "Ignoring user name"
+msgstr ""
+
+#: keystone/identity/controllers.py:139
+#, python-format
+msgid "Unable to remove user %(user)s from %(tenant)s."
+msgstr "Impossible de supprimer l'utilisateur %(user)s depuis %(tenant)s."
+
+#: keystone/identity/controllers.py:158
+#, python-format
+msgid "Unable to add user %(user)s to %(tenant)s."
+msgstr "Impossible d'ajouter l'utilisateur %(user)s à %(tenant)s."
+
+#: keystone/identity/core.py:122
+#, python-format
+msgid "Invalid domain name (%s) found in config file name"
+msgstr "Non de domaine trouvé non valide (%s) dans le fichier de configuration"
+
+#: keystone/identity/core.py:160
+#, python-format
+msgid "Unable to locate domain config directory: %s"
+msgstr "Impossible de localiser le répertoire de configuration domaine: %s"
+
+#: keystone/middleware/core.py:149
+msgid ""
+"XML support has been removed as of the Kilo release and should not be "
+"referenced or used in deployment. Please remove references to "
+"XmlBodyMiddleware from your configuration. This compatibility stub will be "
+"removed in the L release"
+msgstr ""
+
+#: keystone/middleware/core.py:234
+msgid "Auth context already exists in the request environment"
+msgstr ""
+
+#: keystone/openstack/common/loopingcall.py:87
+#, python-format
+msgid "task %(func_name)r run outlasted interval by %(delay).2f sec"
+msgstr ""
+
+#: keystone/openstack/common/service.py:351
+#, python-format
+msgid "pid %d not in child list"
+msgstr "PID %d absent de la liste d'enfants"
+
+#: keystone/resource/core.py:1214
+#, python-format
+msgid ""
+"Found what looks like an unmatched config option substitution reference - "
+"domain: %(domain)s, group: %(group)s, option: %(option)s, value: %(value)s. "
+"Perhaps the config option to which it refers has yet to be added?"
+msgstr ""
+
+#: keystone/resource/core.py:1221
+#, python-format
+msgid ""
+"Found what looks like an incorrectly constructed config option substitution "
+"reference - domain: %(domain)s, group: %(group)s, option: %(option)s, value: "
+"%(value)s."
+msgstr ""
+
+#: keystone/token/persistence/core.py:228
+#, python-format
+msgid ""
+"`token_api.%s` is deprecated as of Juno in favor of utilizing methods on "
+"`token_provider_api` and may be removed in Kilo."
+msgstr ""
+
+#: keystone/token/persistence/backends/kvs.py:57
+msgid ""
+"It is recommended to only use the base key-value-store implementation for "
+"the token driver for testing purposes. Please use keystone.token.persistence."
+"backends.memcache.Token or keystone.token.persistence.backends.sql.Token "
+"instead."
+msgstr ""
+
+#: keystone/token/persistence/backends/kvs.py:206
+#, python-format
+msgid "Token `%s` is expired, not adding to the revocation list."
+msgstr ""
+
+#: keystone/token/persistence/backends/kvs.py:240
+#, python-format
+msgid ""
+"Removing `%s` from revocation list due to invalid expires data in revocation "
+"list."
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:46
+#, python-format
+msgid "[fernet_tokens] key_repository is world readable: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:90
+#, python-format
+msgid ""
+"Unable to change the ownership of [fernet_tokens] key_repository without a "
+"keystone user ID and keystone group ID both being provided: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:112
+#, python-format
+msgid ""
+"Unable to change the ownership of the new key without a keystone user ID and "
+"keystone group ID both being provided: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:204
+msgid ""
+"[fernet_tokens] max_active_keys must be at least 1 to maintain a primary key."
+msgstr ""
diff --git a/keystone-moon/keystone/locale/hu/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/hu/LC_MESSAGES/keystone-log-critical.po
new file mode 100644 (file)
index 0000000..767c150
--- /dev/null
@@ -0,0 +1,25 @@
+# Translations template for keystone.
+# Copyright (C) 2014 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"PO-Revision-Date: 2014-08-31 15:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: Hungarian (http://www.transifex.com/projects/p/keystone/"
+"language/hu/)\n"
+"Language: hu\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+#: keystone/catalog/backends/templated.py:106
+#, python-format
+msgid "Unable to open template file %s"
+msgstr "Nem nyitható meg a sablonfájl: %s"
diff --git a/keystone-moon/keystone/locale/it/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/it/LC_MESSAGES/keystone-log-critical.po
new file mode 100644 (file)
index 0000000..3501010
--- /dev/null
@@ -0,0 +1,25 @@
+# Translations template for keystone.
+# Copyright (C) 2014 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"PO-Revision-Date: 2014-08-31 15:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: Italian (http://www.transifex.com/projects/p/keystone/"
+"language/it/)\n"
+"Language: it\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+#: keystone/catalog/backends/templated.py:106
+#, python-format
+msgid "Unable to open template file %s"
+msgstr "Impossibile aprire il file di template %s"
diff --git a/keystone-moon/keystone/locale/it/LC_MESSAGES/keystone-log-error.po b/keystone-moon/keystone/locale/it/LC_MESSAGES/keystone-log-error.po
new file mode 100644 (file)
index 0000000..d6ac2cf
--- /dev/null
@@ -0,0 +1,173 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-03-09 06:03+0000\n"
+"PO-Revision-Date: 2015-03-07 04:31+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: Italian (http://www.transifex.com/projects/p/keystone/"
+"language/it/)\n"
+"Language: it\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+#: keystone/notifications.py:304
+msgid "Failed to construct notifier"
+msgstr ""
+
+#: keystone/notifications.py:389
+#, python-format
+msgid "Failed to send %(res_id)s %(event_type)s notification"
+msgstr ""
+
+#: keystone/notifications.py:606
+#, python-format
+msgid "Failed to send %(action)s %(event_type)s notification"
+msgstr ""
+
+#: keystone/catalog/core.py:62
+#, python-format
+msgid "Malformed endpoint - %(url)r is not a string"
+msgstr ""
+
+#: keystone/catalog/core.py:66
+#, python-format
+msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s"
+msgstr "Endpoint %(url)s non valdio - chiave sconosciuta %(keyerror)s"
+
+#: keystone/catalog/core.py:71
+#, python-format
+msgid ""
+"Malformed endpoint '%(url)s'. The following type error occurred during "
+"string substitution: %(typeerror)s"
+msgstr ""
+
+#: keystone/catalog/core.py:77
+#, python-format
+msgid ""
+"Malformed endpoint %s - incomplete format (are you missing a type notifier ?)"
+msgstr ""
+
+#: keystone/common/openssl.py:93
+#, python-format
+msgid "Command %(to_exec)s exited with %(retcode)s- %(output)s"
+msgstr ""
+
+#: keystone/common/openssl.py:121
+#, python-format
+msgid "Failed to remove file %(file_path)r: %(error)s"
+msgstr ""
+
+#: keystone/common/utils.py:239
+msgid ""
+"Error setting up the debug environment. Verify that the option --debug-url "
+"has the format <host>:<port> and that a debugger processes is listening on "
+"that port."
+msgstr ""
+
+#: keystone/common/cache/core.py:100
+#, python-format
+msgid ""
+"Unable to build cache config-key. Expected format \"<argname>:<value>\". "
+"Skipping unknown format: %s"
+msgstr ""
+
+#: keystone/common/environment/eventlet_server.py:99
+#, python-format
+msgid "Could not bind to %(host)s:%(port)s"
+msgstr "Impossible fare il bind verso %(host)s:%(port)s"
+
+#: keystone/common/environment/eventlet_server.py:185
+msgid "Server error"
+msgstr "Errore del server"
+
+#: keystone/contrib/endpoint_policy/core.py:129
+#: keystone/contrib/endpoint_policy/core.py:228
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found in region tree - %(region_id)s."
+msgstr ""
+
+#: keystone/contrib/federation/idp.py:410
+#, python-format
+msgid "Error when signing assertion, reason: %(reason)s"
+msgstr ""
+
+#: keystone/contrib/oauth1/core.py:136
+msgid "Cannot retrieve Authorization headers"
+msgstr ""
+
+#: keystone/openstack/common/loopingcall.py:95
+msgid "in fixed duration looping call"
+msgstr "chiamata in loop a durata fissa"
+
+#: keystone/openstack/common/loopingcall.py:138
+msgid "in dynamic looping call"
+msgstr "chiamata in loop dinamico"
+
+#: keystone/openstack/common/service.py:268
+msgid "Unhandled exception"
+msgstr "Eccezione non gestita"
+
+#: keystone/resource/core.py:477
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found projects hierarchy - "
+"%(project_id)s."
+msgstr ""
+
+#: keystone/resource/core.py:939
+#, python-format
+msgid ""
+"Unexpected results in response for domain config - %(count)s responses, "
+"first option is %(option)s, expected option %(expected)s"
+msgstr ""
+
+#: keystone/resource/backends/sql.py:102 keystone/resource/backends/sql.py:121
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found in projects hierarchy - "
+"%(project_id)s."
+msgstr ""
+
+#: keystone/token/provider.py:292
+#, python-format
+msgid "Unexpected error or malformed token determining token expiry: %s"
+msgstr ""
+
+#: keystone/token/persistence/backends/kvs.py:226
+#, python-format
+msgid ""
+"Reinitializing revocation list due to error in loading revocation list from "
+"backend.  Expected `list` type got `%(type)s`. Old revocation list data: "
+"%(list)r"
+msgstr ""
+
+#: keystone/token/providers/common.py:611
+msgid "Failed to validate token"
+msgstr ""
+
+#: keystone/token/providers/pki.py:47
+msgid "Unable to sign token"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:38
+#, python-format
+msgid ""
+"Either [fernet_tokens] key_repository does not exist or Keystone does not "
+"have sufficient permission to access it: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:79
+msgid ""
+"Failed to create [fernet_tokens] key_repository: either it already exists or "
+"you don't have sufficient permissions to create it"
+msgstr ""
diff --git a/keystone-moon/keystone/locale/it/LC_MESSAGES/keystone-log-info.po b/keystone-moon/keystone/locale/it/LC_MESSAGES/keystone-log-info.po
new file mode 100644 (file)
index 0000000..b88a5de
--- /dev/null
@@ -0,0 +1,211 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-03-09 06:03+0000\n"
+"PO-Revision-Date: 2015-03-07 04:31+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: Italian (http://www.transifex.com/projects/p/keystone/"
+"language/it/)\n"
+"Language: it\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+#: keystone/assignment/core.py:250
+#, python-format
+msgid "Creating the default role %s because it does not exist."
+msgstr ""
+
+#: keystone/assignment/core.py:258
+#, python-format
+msgid "Creating the default role %s failed because it was already created"
+msgstr ""
+
+#: keystone/auth/controllers.py:64
+msgid "Loading auth-plugins by class-name is deprecated."
+msgstr ""
+
+#: keystone/auth/controllers.py:106
+#, python-format
+msgid ""
+"\"expires_at\" has conflicting values %(existing)s and %(new)s.  Will use "
+"the earliest value."
+msgstr ""
+
+#: keystone/common/openssl.py:81
+#, python-format
+msgid "Running command - %s"
+msgstr ""
+
+#: keystone/common/wsgi.py:79
+msgid "No bind information present in token"
+msgstr ""
+
+#: keystone/common/wsgi.py:83
+#, python-format
+msgid "Named bind mode %s not in bind information"
+msgstr ""
+
+#: keystone/common/wsgi.py:90
+msgid "Kerberos credentials required and not present"
+msgstr ""
+
+#: keystone/common/wsgi.py:94
+msgid "Kerberos credentials do not match those in bind"
+msgstr ""
+
+#: keystone/common/wsgi.py:98
+msgid "Kerberos bind authentication successful"
+msgstr ""
+
+#: keystone/common/wsgi.py:105
+#, python-format
+msgid "Couldn't verify unknown bind: {%(bind_type)s: %(identifier)s}"
+msgstr ""
+
+#: keystone/common/environment/eventlet_server.py:103
+#, python-format
+msgid "Starting %(arg0)s on %(host)s:%(port)s"
+msgstr "Avvio %(arg0)s in %(host)s:%(port)s"
+
+#: keystone/common/kvs/core.py:138
+#, python-format
+msgid "Adding proxy '%(proxy)s' to KVS %(name)s."
+msgstr ""
+
+#: keystone/common/kvs/core.py:188
+#, python-format
+msgid "Using %(func)s as KVS region %(name)s key_mangler"
+msgstr ""
+
+#: keystone/common/kvs/core.py:200
+#, python-format
+msgid "Using default dogpile sha1_mangle_key as KVS region %s key_mangler"
+msgstr ""
+
+#: keystone/common/kvs/core.py:210
+#, python-format
+msgid "KVS region %s key_mangler disabled."
+msgstr ""
+
+#: keystone/contrib/example/core.py:64 keystone/contrib/example/core.py:73
+#, python-format
+msgid ""
+"Received the following notification: service %(service)s, resource_type: "
+"%(resource_type)s, operation %(operation)s payload %(payload)s"
+msgstr ""
+
+#: keystone/openstack/common/eventlet_backdoor.py:146
+#, python-format
+msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
+msgstr "Ascolto di eventlet backdoor su %(port)s per il processo %(pid)d"
+
+#: keystone/openstack/common/service.py:173
+#, python-format
+msgid "Caught %s, exiting"
+msgstr "Rilevato %s, esistente"
+
+#: keystone/openstack/common/service.py:231
+msgid "Parent process has died unexpectedly, exiting"
+msgstr "Il processo principale è stato interrotto inaspettatamente, uscire"
+
+#: keystone/openstack/common/service.py:262
+#, python-format
+msgid "Child caught %s, exiting"
+msgstr "Cogliere Child %s, uscendo"
+
+#: keystone/openstack/common/service.py:301
+msgid "Forking too fast, sleeping"
+msgstr "Sblocco troppo veloce, attendere"
+
+#: keystone/openstack/common/service.py:320
+#, python-format
+msgid "Started child %d"
+msgstr "Child avviato %d"
+
+#: keystone/openstack/common/service.py:330
+#, python-format
+msgid "Starting %d workers"
+msgstr "Avvio %d operatori"
+
+#: keystone/openstack/common/service.py:347
+#, python-format
+msgid "Child %(pid)d killed by signal %(sig)d"
+msgstr "Child %(pid)d interrotto dal segnale %(sig)d"
+
+#: keystone/openstack/common/service.py:351
+#, python-format
+msgid "Child %(pid)s exited with status %(code)d"
+msgstr "Child %(pid)s terminato con stato %(code)d"
+
+#: keystone/openstack/common/service.py:390
+#, python-format
+msgid "Caught %s, stopping children"
+msgstr "Intercettato %s, arresto in corso dei children"
+
+#: keystone/openstack/common/service.py:399
+msgid "Wait called after thread killed. Cleaning up."
+msgstr ""
+
+#: keystone/openstack/common/service.py:415
+#, python-format
+msgid "Waiting on %d children to exit"
+msgstr "In attesa %d degli elementi secondari per uscire"
+
+#: keystone/token/persistence/backends/sql.py:279
+#, python-format
+msgid "Total expired tokens removed: %d"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:72
+msgid ""
+"[fernet_tokens] key_repository does not appear to exist; attempting to "
+"create it"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:130
+#, python-format
+msgid "Created a new key: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:143
+msgid "Key repository is already initialized; aborting."
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:179
+#, python-format
+msgid "Starting key rotation with %(count)s key files: %(list)s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:185
+#, python-format
+msgid "Current primary key is: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:187
+#, python-format
+msgid "Next primary key will be: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:197
+#, python-format
+msgid "Promoted key 0 to be the primary: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:213
+#, python-format
+msgid "Excess keys to purge: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:237
+#, python-format
+msgid "Loaded %(count)s encryption keys from: %(dir)s"
+msgstr ""
diff --git a/keystone-moon/keystone/locale/ja/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/ja/LC_MESSAGES/keystone-log-critical.po
new file mode 100644 (file)
index 0000000..b83aaad
--- /dev/null
@@ -0,0 +1,25 @@
+# Translations template for keystone.
+# Copyright (C) 2014 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"PO-Revision-Date: 2014-08-31 15:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: Japanese (http://www.transifex.com/projects/p/keystone/"
+"language/ja/)\n"
+"Language: ja\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+
+#: keystone/catalog/backends/templated.py:106
+#, python-format
+msgid "Unable to open template file %s"
+msgstr "テンプレートファイル %s を開けません"
diff --git a/keystone-moon/keystone/locale/ja/LC_MESSAGES/keystone-log-error.po b/keystone-moon/keystone/locale/ja/LC_MESSAGES/keystone-log-error.po
new file mode 100644 (file)
index 0000000..d3e6062
--- /dev/null
@@ -0,0 +1,177 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+# Kuo(Kyohei MORIYAMA) <>, 2014
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-03-09 06:03+0000\n"
+"PO-Revision-Date: 2015-03-07 04:31+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: Japanese (http://www.transifex.com/projects/p/keystone/"
+"language/ja/)\n"
+"Language: ja\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+
+#: keystone/notifications.py:304
+msgid "Failed to construct notifier"
+msgstr ""
+
+#: keystone/notifications.py:389
+#, python-format
+msgid "Failed to send %(res_id)s %(event_type)s notification"
+msgstr ""
+
+#: keystone/notifications.py:606
+#, python-format
+msgid "Failed to send %(action)s %(event_type)s notification"
+msgstr ""
+
+#: keystone/catalog/core.py:62
+#, python-format
+msgid "Malformed endpoint - %(url)r is not a string"
+msgstr ""
+
+#: keystone/catalog/core.py:66
+#, python-format
+msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s"
+msgstr "不正な形式のエンドポイント %(url)s - 未知のキー %(keyerror)s"
+
+#: keystone/catalog/core.py:71
+#, python-format
+msgid ""
+"Malformed endpoint '%(url)s'. The following type error occurred during "
+"string substitution: %(typeerror)s"
+msgstr ""
+
+#: keystone/catalog/core.py:77
+#, python-format
+msgid ""
+"Malformed endpoint %s - incomplete format (are you missing a type notifier ?)"
+msgstr ""
+
+#: keystone/common/openssl.py:93
+#, python-format
+msgid "Command %(to_exec)s exited with %(retcode)s- %(output)s"
+msgstr ""
+
+#: keystone/common/openssl.py:121
+#, python-format
+msgid "Failed to remove file %(file_path)r: %(error)s"
+msgstr ""
+
+#: keystone/common/utils.py:239
+msgid ""
+"Error setting up the debug environment. Verify that the option --debug-url "
+"has the format <host>:<port> and that a debugger processes is listening on "
+"that port."
+msgstr ""
+"デバッグ環境のセットアップ中にエラーが発生しました。オプション --debug-url "
+"が <host>:<port> の形式を持ち、デバッガープロセスがそのポートにおいてリッスン"
+"していることを確認してください。"
+
+#: keystone/common/cache/core.py:100
+#, python-format
+msgid ""
+"Unable to build cache config-key. Expected format \"<argname>:<value>\". "
+"Skipping unknown format: %s"
+msgstr ""
+
+#: keystone/common/environment/eventlet_server.py:99
+#, python-format
+msgid "Could not bind to %(host)s:%(port)s"
+msgstr "%(host)s:%(port)s がバインドできません。"
+
+#: keystone/common/environment/eventlet_server.py:185
+msgid "Server error"
+msgstr "内部サーバーエラー"
+
+#: keystone/contrib/endpoint_policy/core.py:129
+#: keystone/contrib/endpoint_policy/core.py:228
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found in region tree - %(region_id)s."
+msgstr ""
+
+#: keystone/contrib/federation/idp.py:410
+#, python-format
+msgid "Error when signing assertion, reason: %(reason)s"
+msgstr "サインアサーション時にエラーが発生しました。理由:%(reason)s"
+
+#: keystone/contrib/oauth1/core.py:136
+msgid "Cannot retrieve Authorization headers"
+msgstr ""
+
+#: keystone/openstack/common/loopingcall.py:95
+msgid "in fixed duration looping call"
+msgstr "一定期間の呼び出しループ"
+
+#: keystone/openstack/common/loopingcall.py:138
+msgid "in dynamic looping call"
+msgstr "動的呼び出しループ"
+
+#: keystone/openstack/common/service.py:268
+msgid "Unhandled exception"
+msgstr "未処理例外"
+
+#: keystone/resource/core.py:477
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found projects hierarchy - "
+"%(project_id)s."
+msgstr ""
+
+#: keystone/resource/core.py:939
+#, python-format
+msgid ""
+"Unexpected results in response for domain config - %(count)s responses, "
+"first option is %(option)s, expected option %(expected)s"
+msgstr ""
+
+#: keystone/resource/backends/sql.py:102 keystone/resource/backends/sql.py:121
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found in projects hierarchy - "
+"%(project_id)s."
+msgstr ""
+
+#: keystone/token/provider.py:292
+#, python-format
+msgid "Unexpected error or malformed token determining token expiry: %s"
+msgstr ""
+
+#: keystone/token/persistence/backends/kvs.py:226
+#, python-format
+msgid ""
+"Reinitializing revocation list due to error in loading revocation list from "
+"backend.  Expected `list` type got `%(type)s`. Old revocation list data: "
+"%(list)r"
+msgstr ""
+
+#: keystone/token/providers/common.py:611
+msgid "Failed to validate token"
+msgstr ""
+
+#: keystone/token/providers/pki.py:47
+msgid "Unable to sign token"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:38
+#, python-format
+msgid ""
+"Either [fernet_tokens] key_repository does not exist or Keystone does not "
+"have sufficient permission to access it: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:79
+msgid ""
+"Failed to create [fernet_tokens] key_repository: either it already exists or "
+"you don't have sufficient permissions to create it"
+msgstr ""
diff --git a/keystone-moon/keystone/locale/keystone-log-critical.pot b/keystone-moon/keystone/locale/keystone-log-critical.pot
new file mode 100644 (file)
index 0000000..e07dd7a
--- /dev/null
@@ -0,0 +1,24 @@
+# Translations template for keystone.
+# Copyright (C) 2014 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2014.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: keystone 2014.2.dev28.g7e410ae\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
+"Language-Team: LANGUAGE <LL@li.org>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+
+#: keystone/catalog/backends/templated.py:106
+#, python-format
+msgid "Unable to open template file %s"
+msgstr ""
+
diff --git a/keystone-moon/keystone/locale/keystone-log-error.pot b/keystone-moon/keystone/locale/keystone-log-error.pot
new file mode 100644 (file)
index 0000000..bca25a1
--- /dev/null
@@ -0,0 +1,174 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2015.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: keystone 2015.1.dev362\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-03-09 06:03+0000\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
+"Language-Team: LANGUAGE <LL@li.org>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+
+#: keystone/notifications.py:304
+msgid "Failed to construct notifier"
+msgstr ""
+
+#: keystone/notifications.py:389
+#, python-format
+msgid "Failed to send %(res_id)s %(event_type)s notification"
+msgstr ""
+
+#: keystone/notifications.py:606
+#, python-format
+msgid "Failed to send %(action)s %(event_type)s notification"
+msgstr ""
+
+#: keystone/catalog/core.py:62
+#, python-format
+msgid "Malformed endpoint - %(url)r is not a string"
+msgstr ""
+
+#: keystone/catalog/core.py:66
+#, python-format
+msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s"
+msgstr ""
+
+#: keystone/catalog/core.py:71
+#, python-format
+msgid ""
+"Malformed endpoint '%(url)s'. The following type error occurred during "
+"string substitution: %(typeerror)s"
+msgstr ""
+
+#: keystone/catalog/core.py:77
+#, python-format
+msgid ""
+"Malformed endpoint %s - incomplete format (are you missing a type "
+"notifier ?)"
+msgstr ""
+
+#: keystone/common/openssl.py:93
+#, python-format
+msgid "Command %(to_exec)s exited with %(retcode)s- %(output)s"
+msgstr ""
+
+#: keystone/common/openssl.py:121
+#, python-format
+msgid "Failed to remove file %(file_path)r: %(error)s"
+msgstr ""
+
+#: keystone/common/utils.py:239
+msgid ""
+"Error setting up the debug environment. Verify that the option --debug-"
+"url has the format <host>:<port> and that a debugger processes is "
+"listening on that port."
+msgstr ""
+
+#: keystone/common/cache/core.py:100
+#, python-format
+msgid ""
+"Unable to build cache config-key. Expected format \"<argname>:<value>\". "
+"Skipping unknown format: %s"
+msgstr ""
+
+#: keystone/common/environment/eventlet_server.py:99
+#, python-format
+msgid "Could not bind to %(host)s:%(port)s"
+msgstr ""
+
+#: keystone/common/environment/eventlet_server.py:185
+msgid "Server error"
+msgstr ""
+
+#: keystone/contrib/endpoint_policy/core.py:129
+#: keystone/contrib/endpoint_policy/core.py:228
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found in region tree - "
+"%(region_id)s."
+msgstr ""
+
+#: keystone/contrib/federation/idp.py:410
+#, python-format
+msgid "Error when signing assertion, reason: %(reason)s"
+msgstr ""
+
+#: keystone/contrib/oauth1/core.py:136
+msgid "Cannot retrieve Authorization headers"
+msgstr ""
+
+#: keystone/openstack/common/loopingcall.py:95
+msgid "in fixed duration looping call"
+msgstr ""
+
+#: keystone/openstack/common/loopingcall.py:138
+msgid "in dynamic looping call"
+msgstr ""
+
+#: keystone/openstack/common/service.py:268
+msgid "Unhandled exception"
+msgstr ""
+
+#: keystone/resource/core.py:477
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found projects hierarchy - "
+"%(project_id)s."
+msgstr ""
+
+#: keystone/resource/core.py:939
+#, python-format
+msgid ""
+"Unexpected results in response for domain config - %(count)s responses, "
+"first option is %(option)s, expected option %(expected)s"
+msgstr ""
+
+#: keystone/resource/backends/sql.py:102 keystone/resource/backends/sql.py:121
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found in projects hierarchy - "
+"%(project_id)s."
+msgstr ""
+
+#: keystone/token/provider.py:292
+#, python-format
+msgid "Unexpected error or malformed token determining token expiry: %s"
+msgstr ""
+
+#: keystone/token/persistence/backends/kvs.py:226
+#, python-format
+msgid ""
+"Reinitializing revocation list due to error in loading revocation list "
+"from backend.  Expected `list` type got `%(type)s`. Old revocation list "
+"data: %(list)r"
+msgstr ""
+
+#: keystone/token/providers/common.py:611
+msgid "Failed to validate token"
+msgstr ""
+
+#: keystone/token/providers/pki.py:47
+msgid "Unable to sign token"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:38
+#, python-format
+msgid ""
+"Either [fernet_tokens] key_repository does not exist or Keystone does not"
+" have sufficient permission to access it: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:79
+msgid ""
+"Failed to create [fernet_tokens] key_repository: either it already exists"
+" or you don't have sufficient permissions to create it"
+msgstr ""
+
diff --git a/keystone-moon/keystone/locale/keystone-log-info.pot b/keystone-moon/keystone/locale/keystone-log-info.pot
new file mode 100644 (file)
index 0000000..17abd1d
--- /dev/null
@@ -0,0 +1,210 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2015.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: keystone 2015.1.dev362\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-03-09 06:03+0000\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
+"Language-Team: LANGUAGE <LL@li.org>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+
+#: keystone/assignment/core.py:250
+#, python-format
+msgid "Creating the default role %s because it does not exist."
+msgstr ""
+
+#: keystone/assignment/core.py:258
+#, python-format
+msgid "Creating the default role %s failed because it was already created"
+msgstr ""
+
+#: keystone/auth/controllers.py:64
+msgid "Loading auth-plugins by class-name is deprecated."
+msgstr ""
+
+#: keystone/auth/controllers.py:106
+#, python-format
+msgid ""
+"\"expires_at\" has conflicting values %(existing)s and %(new)s.  Will use"
+" the earliest value."
+msgstr ""
+
+#: keystone/common/openssl.py:81
+#, python-format
+msgid "Running command - %s"
+msgstr ""
+
+#: keystone/common/wsgi.py:79
+msgid "No bind information present in token"
+msgstr ""
+
+#: keystone/common/wsgi.py:83
+#, python-format
+msgid "Named bind mode %s not in bind information"
+msgstr ""
+
+#: keystone/common/wsgi.py:90
+msgid "Kerberos credentials required and not present"
+msgstr ""
+
+#: keystone/common/wsgi.py:94
+msgid "Kerberos credentials do not match those in bind"
+msgstr ""
+
+#: keystone/common/wsgi.py:98
+msgid "Kerberos bind authentication successful"
+msgstr ""
+
+#: keystone/common/wsgi.py:105
+#, python-format
+msgid "Couldn't verify unknown bind: {%(bind_type)s: %(identifier)s}"
+msgstr ""
+
+#: keystone/common/environment/eventlet_server.py:103
+#, python-format
+msgid "Starting %(arg0)s on %(host)s:%(port)s"
+msgstr ""
+
+#: keystone/common/kvs/core.py:138
+#, python-format
+msgid "Adding proxy '%(proxy)s' to KVS %(name)s."
+msgstr ""
+
+#: keystone/common/kvs/core.py:188
+#, python-format
+msgid "Using %(func)s as KVS region %(name)s key_mangler"
+msgstr ""
+
+#: keystone/common/kvs/core.py:200
+#, python-format
+msgid "Using default dogpile sha1_mangle_key as KVS region %s key_mangler"
+msgstr ""
+
+#: keystone/common/kvs/core.py:210
+#, python-format
+msgid "KVS region %s key_mangler disabled."
+msgstr ""
+
+#: keystone/contrib/example/core.py:64 keystone/contrib/example/core.py:73
+#, python-format
+msgid ""
+"Received the following notification: service %(service)s, resource_type: "
+"%(resource_type)s, operation %(operation)s payload %(payload)s"
+msgstr ""
+
+#: keystone/openstack/common/eventlet_backdoor.py:146
+#, python-format
+msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
+msgstr ""
+
+#: keystone/openstack/common/service.py:173
+#, python-format
+msgid "Caught %s, exiting"
+msgstr ""
+
+#: keystone/openstack/common/service.py:231
+msgid "Parent process has died unexpectedly, exiting"
+msgstr ""
+
+#: keystone/openstack/common/service.py:262
+#, python-format
+msgid "Child caught %s, exiting"
+msgstr ""
+
+#: keystone/openstack/common/service.py:301
+msgid "Forking too fast, sleeping"
+msgstr ""
+
+#: keystone/openstack/common/service.py:320
+#, python-format
+msgid "Started child %d"
+msgstr ""
+
+#: keystone/openstack/common/service.py:330
+#, python-format
+msgid "Starting %d workers"
+msgstr ""
+
+#: keystone/openstack/common/service.py:347
+#, python-format
+msgid "Child %(pid)d killed by signal %(sig)d"
+msgstr ""
+
+#: keystone/openstack/common/service.py:351
+#, python-format
+msgid "Child %(pid)s exited with status %(code)d"
+msgstr ""
+
+#: keystone/openstack/common/service.py:390
+#, python-format
+msgid "Caught %s, stopping children"
+msgstr ""
+
+#: keystone/openstack/common/service.py:399
+msgid "Wait called after thread killed. Cleaning up."
+msgstr ""
+
+#: keystone/openstack/common/service.py:415
+#, python-format
+msgid "Waiting on %d children to exit"
+msgstr ""
+
+#: keystone/token/persistence/backends/sql.py:279
+#, python-format
+msgid "Total expired tokens removed: %d"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:72
+msgid ""
+"[fernet_tokens] key_repository does not appear to exist; attempting to "
+"create it"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:130
+#, python-format
+msgid "Created a new key: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:143
+msgid "Key repository is already initialized; aborting."
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:179
+#, python-format
+msgid "Starting key rotation with %(count)s key files: %(list)s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:185
+#, python-format
+msgid "Current primary key is: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:187
+#, python-format
+msgid "Next primary key will be: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:197
+#, python-format
+msgid "Promoted key 0 to be the primary: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:213
+#, python-format
+msgid "Excess keys to purge: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:237
+#, python-format
+msgid "Loaded %(count)s encryption keys from: %(dir)s"
+msgstr ""
+
diff --git a/keystone-moon/keystone/locale/keystone-log-warning.pot b/keystone-moon/keystone/locale/keystone-log-warning.pot
new file mode 100644 (file)
index 0000000..ddf2931
--- /dev/null
@@ -0,0 +1,290 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2015.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: keystone 2015.1.dev497\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-03-19 06:04+0000\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
+"Language-Team: LANGUAGE <LL@li.org>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+
+#: keystone/cli.py:159
+msgid "keystone-manage pki_setup is not recommended for production use."
+msgstr ""
+
+#: keystone/cli.py:178
+msgid "keystone-manage ssl_setup is not recommended for production use."
+msgstr ""
+
+#: keystone/cli.py:493
+#, python-format
+msgid "Ignoring file (%s) while scanning domain config directory"
+msgstr ""
+
+#: keystone/exception.py:49
+msgid "missing exception kwargs (programmer error)"
+msgstr ""
+
+#: keystone/assignment/controllers.py:60
+#, python-format
+msgid "Authentication failed: %s"
+msgstr ""
+
+#: keystone/assignment/controllers.py:576
+#, python-format
+msgid ""
+"Group %(group)s not found for role-assignment - %(target)s with Role: "
+"%(role)s"
+msgstr ""
+
+#: keystone/auth/controllers.py:449
+#, python-format
+msgid ""
+"User %(user_id)s doesn't have access to default project %(project_id)s. "
+"The token will be unscoped rather than scoped to the project."
+msgstr ""
+
+#: keystone/auth/controllers.py:457
+#, python-format
+msgid ""
+"User %(user_id)s's default project %(project_id)s is disabled. The token "
+"will be unscoped rather than scoped to the project."
+msgstr ""
+
+#: keystone/auth/controllers.py:466
+#, python-format
+msgid ""
+"User %(user_id)s's default project %(project_id)s not found. The token "
+"will be unscoped rather than scoped to the project."
+msgstr ""
+
+#: keystone/common/authorization.py:55
+msgid "RBAC: Invalid user data in token"
+msgstr ""
+
+#: keystone/common/controller.py:79 keystone/middleware/core.py:224
+msgid "RBAC: Invalid token"
+msgstr ""
+
+#: keystone/common/controller.py:104 keystone/common/controller.py:201
+#: keystone/common/controller.py:740
+msgid "RBAC: Bypassing authorization"
+msgstr ""
+
+#: keystone/common/controller.py:669 keystone/common/controller.py:704
+msgid "Invalid token found while getting domain ID for list request"
+msgstr ""
+
+#: keystone/common/controller.py:677
+msgid "No domain information specified as part of list request"
+msgstr ""
+
+#: keystone/common/utils.py:103
+#, python-format
+msgid "Truncating user password to %d characters."
+msgstr ""
+
+#: keystone/common/wsgi.py:242
+#, python-format
+msgid "Authorization failed. %(exception)s from %(remote_addr)s"
+msgstr ""
+
+#: keystone/common/wsgi.py:361
+msgid "Invalid token in _get_trust_id_for_request"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:403
+#, python-format
+msgid ""
+"TTL index already exists on db collection <%(c_name)s>, remove index "
+"<%(indx_name)s> first to make updated mongo_ttl_seconds value to be  "
+"effective"
+msgstr ""
+
+#: keystone/common/kvs/core.py:134
+#, python-format
+msgid "%s is not a dogpile.proxy.ProxyBackend"
+msgstr ""
+
+#: keystone/common/kvs/core.py:403
+#, python-format
+msgid "KVS lock released (timeout reached) for: %s"
+msgstr ""
+
+#: keystone/common/ldap/core.py:1026
+msgid ""
+"LDAP Server does not support paging. Disable paging in keystone.conf to "
+"avoid this message."
+msgstr ""
+
+#: keystone/common/ldap/core.py:1225
+#, python-format
+msgid ""
+"Invalid additional attribute mapping: \"%s\". Format must be "
+"<ldap_attribute>:<keystone_attribute>"
+msgstr ""
+
+#: keystone/common/ldap/core.py:1336
+#, python-format
+msgid ""
+"ID attribute %(id_attr)s for LDAP object %(dn)s has multiple values and "
+"therefore cannot be used as an ID. Will get the ID from DN instead"
+msgstr ""
+
+#: keystone/common/ldap/core.py:1669
+#, python-format
+msgid ""
+"When deleting entries for %(search_base)s, could not delete nonexistent "
+"entries %(entries)s%(dots)s"
+msgstr ""
+
+#: keystone/contrib/endpoint_policy/core.py:91
+#, python-format
+msgid ""
+"Endpoint %(endpoint_id)s referenced in association for policy "
+"%(policy_id)s not found."
+msgstr ""
+
+#: keystone/contrib/endpoint_policy/core.py:179
+#, python-format
+msgid ""
+"Unsupported policy association found - Policy %(policy_id)s, Endpoint "
+"%(endpoint_id)s, Service %(service_id)s, Region %(region_id)s, "
+msgstr ""
+
+#: keystone/contrib/endpoint_policy/core.py:195
+#, python-format
+msgid ""
+"Policy %(policy_id)s referenced in association for endpoint "
+"%(endpoint_id)s not found."
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:200
+#, python-format
+msgid "Impossible to identify the IdP %s "
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:523
+msgid "Ignoring user name"
+msgstr ""
+
+#: keystone/identity/controllers.py:139
+#, python-format
+msgid "Unable to remove user %(user)s from %(tenant)s."
+msgstr ""
+
+#: keystone/identity/controllers.py:158
+#, python-format
+msgid "Unable to add user %(user)s to %(tenant)s."
+msgstr ""
+
+#: keystone/identity/core.py:122
+#, python-format
+msgid "Invalid domain name (%s) found in config file name"
+msgstr ""
+
+#: keystone/identity/core.py:160
+#, python-format
+msgid "Unable to locate domain config directory: %s"
+msgstr ""
+
+#: keystone/middleware/core.py:149
+msgid ""
+"XML support has been removed as of the Kilo release and should not be "
+"referenced or used in deployment. Please remove references to "
+"XmlBodyMiddleware from your configuration. This compatibility stub will "
+"be removed in the L release"
+msgstr ""
+
+#: keystone/middleware/core.py:234
+msgid "Auth context already exists in the request environment"
+msgstr ""
+
+#: keystone/openstack/common/loopingcall.py:87
+#, python-format
+msgid "task %(func_name)r run outlasted interval by %(delay).2f sec"
+msgstr ""
+
+#: keystone/openstack/common/service.py:351
+#, python-format
+msgid "pid %d not in child list"
+msgstr ""
+
+#: keystone/resource/core.py:1214
+#, python-format
+msgid ""
+"Found what looks like an unmatched config option substitution reference -"
+" domain: %(domain)s, group: %(group)s, option: %(option)s, value: "
+"%(value)s. Perhaps the config option to which it refers has yet to be "
+"added?"
+msgstr ""
+
+#: keystone/resource/core.py:1221
+#, python-format
+msgid ""
+"Found what looks like an incorrectly constructed config option "
+"substitution reference - domain: %(domain)s, group: %(group)s, option: "
+"%(option)s, value: %(value)s."
+msgstr ""
+
+#: keystone/token/persistence/core.py:228
+#, python-format
+msgid ""
+"`token_api.%s` is deprecated as of Juno in favor of utilizing methods on "
+"`token_provider_api` and may be removed in Kilo."
+msgstr ""
+
+#: keystone/token/persistence/backends/kvs.py:57
+msgid ""
+"It is recommended to only use the base key-value-store implementation for"
+" the token driver for testing purposes. Please use "
+"keystone.token.persistence.backends.memcache.Token or "
+"keystone.token.persistence.backends.sql.Token instead."
+msgstr ""
+
+#: keystone/token/persistence/backends/kvs.py:206
+#, python-format
+msgid "Token `%s` is expired, not adding to the revocation list."
+msgstr ""
+
+#: keystone/token/persistence/backends/kvs.py:240
+#, python-format
+msgid ""
+"Removing `%s` from revocation list due to invalid expires data in "
+"revocation list."
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:46
+#, python-format
+msgid "[fernet_tokens] key_repository is world readable: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:90
+#, python-format
+msgid ""
+"Unable to change the ownership of [fernet_tokens] key_repository without "
+"a keystone user ID and keystone group ID both being provided: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:112
+#, python-format
+msgid ""
+"Unable to change the ownership of the new key without a keystone user ID "
+"and keystone group ID both being provided: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:204
+msgid ""
+"[fernet_tokens] max_active_keys must be at least 1 to maintain a primary "
+"key."
+msgstr ""
+
diff --git a/keystone-moon/keystone/locale/keystone.pot b/keystone-moon/keystone/locale/keystone.pot
new file mode 100644 (file)
index 0000000..df46fa7
--- /dev/null
@@ -0,0 +1,1522 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2015.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: keystone 2015.1.dev497\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-03-19 06:03+0000\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
+"Language-Team: LANGUAGE <LL@li.org>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+
+#: keystone/clean.py:24
+#, python-format
+msgid "%s cannot be empty."
+msgstr ""
+
+#: keystone/clean.py:26
+#, python-format
+msgid "%(property_name)s cannot be less than %(min_length)s characters."
+msgstr ""
+
+#: keystone/clean.py:31
+#, python-format
+msgid "%(property_name)s should not be greater than %(max_length)s characters."
+msgstr ""
+
+#: keystone/clean.py:40
+#, python-format
+msgid "%(property_name)s is not a %(display_expected_type)s"
+msgstr ""
+
+#: keystone/cli.py:283
+msgid "At least one option must be provided"
+msgstr ""
+
+#: keystone/cli.py:290
+msgid "--all option cannot be mixed with other options"
+msgstr ""
+
+#: keystone/cli.py:301
+#, python-format
+msgid "Unknown domain '%(name)s' specified by --domain-name"
+msgstr ""
+
+#: keystone/cli.py:365 keystone/tests/unit/test_cli.py:213
+msgid "At least one option must be provided, use either --all or --domain-name"
+msgstr ""
+
+#: keystone/cli.py:371 keystone/tests/unit/test_cli.py:229
+msgid "The --all option cannot be used with the --domain-name option"
+msgstr ""
+
+#: keystone/cli.py:397 keystone/tests/unit/test_cli.py:246
+#, python-format
+msgid ""
+"Invalid domain name: %(domain)s found in config file name: %(file)s - "
+"ignoring this file."
+msgstr ""
+
+#: keystone/cli.py:405 keystone/tests/unit/test_cli.py:187
+#, python-format
+msgid ""
+"Domain: %(domain)s already has a configuration defined - ignoring file: "
+"%(file)s."
+msgstr ""
+
+#: keystone/cli.py:419
+#, python-format
+msgid "Error parsing configuration file for domain: %(domain)s, file: %(file)s."
+msgstr ""
+
+#: keystone/cli.py:452
+#, python-format
+msgid ""
+"To get a more detailed information on this error, re-run this command for"
+" the specific domain, i.e.: keystone-manage domain_config_upload "
+"--domain-name %s"
+msgstr ""
+
+#: keystone/cli.py:470
+#, python-format
+msgid "Unable to locate domain config directory: %s"
+msgstr ""
+
+#: keystone/cli.py:503
+msgid ""
+"Unable to access the keystone database, please check it is configured "
+"correctly."
+msgstr ""
+
+#: keystone/exception.py:79
+#, python-format
+msgid ""
+"Expecting to find %(attribute)s in %(target)s - the server could not "
+"comply with the request since it is either malformed or otherwise "
+"incorrect. The client is assumed to be in error."
+msgstr ""
+
+#: keystone/exception.py:90
+#, python-format
+msgid "%(detail)s"
+msgstr ""
+
+#: keystone/exception.py:94
+msgid ""
+"Timestamp not in expected format. The server could not comply with the "
+"request since it is either malformed or otherwise incorrect. The client "
+"is assumed to be in error."
+msgstr ""
+
+#: keystone/exception.py:103
+#, python-format
+msgid ""
+"String length exceeded.The length of string '%(string)s' exceeded the "
+"limit of column %(type)s(CHAR(%(length)d))."
+msgstr ""
+
+#: keystone/exception.py:109
+#, python-format
+msgid ""
+"Request attribute %(attribute)s must be less than or equal to %(size)i. "
+"The server could not comply with the request because the attribute size "
+"is invalid (too large). The client is assumed to be in error."
+msgstr ""
+
+#: keystone/exception.py:119
+#, python-format
+msgid ""
+"The specified parent region %(parent_region_id)s would create a circular "
+"region hierarchy."
+msgstr ""
+
+#: keystone/exception.py:126
+#, python-format
+msgid ""
+"The password length must be less than or equal to %(size)i. The server "
+"could not comply with the request because the password is invalid."
+msgstr ""
+
+#: keystone/exception.py:134
+#, python-format
+msgid ""
+"Unable to delete region %(region_id)s because it or its child regions "
+"have associated endpoints."
+msgstr ""
+
+#: keystone/exception.py:141
+msgid ""
+"The certificates you requested are not available. It is likely that this "
+"server does not use PKI tokens otherwise this is the result of "
+"misconfiguration."
+msgstr ""
+
+#: keystone/exception.py:150
+msgid "(Disable debug mode to suppress these details.)"
+msgstr ""
+
+#: keystone/exception.py:155
+#, python-format
+msgid "%(message)s %(amendment)s"
+msgstr ""
+
+#: keystone/exception.py:163
+msgid "The request you have made requires authentication."
+msgstr ""
+
+#: keystone/exception.py:169
+msgid "Authentication plugin error."
+msgstr ""
+
+#: keystone/exception.py:177
+#, python-format
+msgid "Unable to find valid groups while using mapping %(mapping_id)s"
+msgstr ""
+
+#: keystone/exception.py:182
+msgid "Attempted to authenticate with an unsupported method."
+msgstr ""
+
+#: keystone/exception.py:190
+msgid "Additional authentications steps required."
+msgstr ""
+
+#: keystone/exception.py:198
+msgid "You are not authorized to perform the requested action."
+msgstr ""
+
+#: keystone/exception.py:205
+#, python-format
+msgid "You are not authorized to perform the requested action: %(action)s"
+msgstr ""
+
+#: keystone/exception.py:210
+#, python-format
+msgid ""
+"Could not change immutable attribute(s) '%(attributes)s' in target "
+"%(target)s"
+msgstr ""
+
+#: keystone/exception.py:215
+#, python-format
+msgid ""
+"Group membership across backend boundaries is not allowed, group in "
+"question is %(group_id)s, user is %(user_id)s"
+msgstr ""
+
+#: keystone/exception.py:221
+#, python-format
+msgid ""
+"Invalid mix of entities for policy association - only Endpoint, Service "
+"or Region+Service allowed. Request was - Endpoint: %(endpoint_id)s, "
+"Service: %(service_id)s, Region: %(region_id)s"
+msgstr ""
+
+#: keystone/exception.py:228
+#, python-format
+msgid "Invalid domain specific configuration: %(reason)s"
+msgstr ""
+
+#: keystone/exception.py:232
+#, python-format
+msgid "Could not find: %(target)s"
+msgstr ""
+
+#: keystone/exception.py:238
+#, python-format
+msgid "Could not find endpoint: %(endpoint_id)s"
+msgstr ""
+
+#: keystone/exception.py:245
+msgid "An unhandled exception has occurred: Could not find metadata."
+msgstr ""
+
+#: keystone/exception.py:250
+#, python-format
+msgid "Could not find policy: %(policy_id)s"
+msgstr ""
+
+#: keystone/exception.py:254
+msgid "Could not find policy association"
+msgstr ""
+
+#: keystone/exception.py:258
+#, python-format
+msgid "Could not find role: %(role_id)s"
+msgstr ""
+
+#: keystone/exception.py:262
+#, python-format
+msgid ""
+"Could not find role assignment with role: %(role_id)s, user or group: "
+"%(actor_id)s, project or domain: %(target_id)s"
+msgstr ""
+
+#: keystone/exception.py:268
+#, python-format
+msgid "Could not find region: %(region_id)s"
+msgstr ""
+
+#: keystone/exception.py:272
+#, python-format
+msgid "Could not find service: %(service_id)s"
+msgstr ""
+
+#: keystone/exception.py:276
+#, python-format
+msgid "Could not find domain: %(domain_id)s"
+msgstr ""
+
+#: keystone/exception.py:280
+#, python-format
+msgid "Could not find project: %(project_id)s"
+msgstr ""
+
+#: keystone/exception.py:284
+#, python-format
+msgid "Cannot create project with parent: %(project_id)s"
+msgstr ""
+
+#: keystone/exception.py:288
+#, python-format
+msgid "Could not find token: %(token_id)s"
+msgstr ""
+
+#: keystone/exception.py:292
+#, python-format
+msgid "Could not find user: %(user_id)s"
+msgstr ""
+
+#: keystone/exception.py:296
+#, python-format
+msgid "Could not find group: %(group_id)s"
+msgstr ""
+
+#: keystone/exception.py:300
+#, python-format
+msgid "Could not find mapping: %(mapping_id)s"
+msgstr ""
+
+#: keystone/exception.py:304
+#, python-format
+msgid "Could not find trust: %(trust_id)s"
+msgstr ""
+
+#: keystone/exception.py:308
+#, python-format
+msgid "No remaining uses for trust: %(trust_id)s"
+msgstr ""
+
+#: keystone/exception.py:312
+#, python-format
+msgid "Could not find credential: %(credential_id)s"
+msgstr ""
+
+#: keystone/exception.py:316
+#, python-format
+msgid "Could not find version: %(version)s"
+msgstr ""
+
+#: keystone/exception.py:320
+#, python-format
+msgid "Could not find Endpoint Group: %(endpoint_group_id)s"
+msgstr ""
+
+#: keystone/exception.py:324
+#, python-format
+msgid "Could not find Identity Provider: %(idp_id)s"
+msgstr ""
+
+#: keystone/exception.py:328
+#, python-format
+msgid "Could not find Service Provider: %(sp_id)s"
+msgstr ""
+
+#: keystone/exception.py:332
+#, python-format
+msgid ""
+"Could not find federated protocol %(protocol_id)s for Identity Provider: "
+"%(idp_id)s"
+msgstr ""
+
+#: keystone/exception.py:343
+#, python-format
+msgid ""
+"Could not find %(group_or_option)s in domain configuration for domain "
+"%(domain_id)s"
+msgstr ""
+
+#: keystone/exception.py:348
+#, python-format
+msgid "Conflict occurred attempting to store %(type)s - %(details)s"
+msgstr ""
+
+#: keystone/exception.py:356
+msgid "An unexpected error prevented the server from fulfilling your request."
+msgstr ""
+
+#: keystone/exception.py:359
+#, python-format
+msgid ""
+"An unexpected error prevented the server from fulfilling your request: "
+"%(exception)s"
+msgstr ""
+
+#: keystone/exception.py:382
+#, python-format
+msgid "Unable to consume trust %(trust_id)s, unable to acquire lock."
+msgstr ""
+
+#: keystone/exception.py:387
+msgid ""
+"Expected signing certificates are not available on the server. Please "
+"check Keystone configuration."
+msgstr ""
+
+#: keystone/exception.py:393
+#, python-format
+msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details."
+msgstr ""
+
+#: keystone/exception.py:398
+#, python-format
+msgid ""
+"Group %(group_id)s returned by mapping %(mapping_id)s was not found in "
+"the backend."
+msgstr ""
+
+#: keystone/exception.py:403
+#, python-format
+msgid "Error while reading metadata file, %(reason)s"
+msgstr ""
+
+#: keystone/exception.py:407
+#, python-format
+msgid ""
+"Unexpected combination of grant attributes - User: %(user_id)s, Group: "
+"%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s"
+msgstr ""
+
+#: keystone/exception.py:414
+msgid "The action you have requested has not been implemented."
+msgstr ""
+
+#: keystone/exception.py:421
+msgid "The service you have requested is no longer available on this server."
+msgstr ""
+
+#: keystone/exception.py:428
+#, python-format
+msgid "The Keystone configuration file %(config_file)s could not be found."
+msgstr ""
+
+#: keystone/exception.py:433
+msgid ""
+"No encryption keys found; run keystone-manage fernet_setup to bootstrap "
+"one."
+msgstr ""
+
+#: keystone/exception.py:438
+#, python-format
+msgid ""
+"The Keystone domain-specific configuration has specified more than one "
+"SQL driver (only one is permitted): %(source)s."
+msgstr ""
+
+#: keystone/exception.py:445
+#, python-format
+msgid ""
+"%(mod_name)s doesn't provide database migrations. The migration "
+"repository path at %(path)s doesn't exist or isn't a directory."
+msgstr ""
+
+#: keystone/exception.py:457
+#, python-format
+msgid ""
+"Unable to sign SAML assertion. It is likely that this server does not "
+"have xmlsec1 installed, or this is the result of misconfiguration. Reason"
+" %(reason)s"
+msgstr ""
+
+#: keystone/exception.py:465
+msgid ""
+"No Authorization headers found, cannot proceed with OAuth related calls, "
+"if running under HTTPd or Apache, ensure WSGIPassAuthorization is set to "
+"On."
+msgstr ""
+
+#: keystone/notifications.py:250
+#, python-format
+msgid "%(event)s is not a valid notification event, must be one of: %(actions)s"
+msgstr ""
+
+#: keystone/notifications.py:259
+#, python-format
+msgid "Method not callable: %s"
+msgstr ""
+
+#: keystone/assignment/controllers.py:107 keystone/identity/controllers.py:69
+#: keystone/resource/controllers.py:78
+msgid "Name field is required and cannot be empty"
+msgstr ""
+
+#: keystone/assignment/controllers.py:330
+#: keystone/assignment/controllers.py:753
+msgid "Specify a domain or project, not both"
+msgstr ""
+
+#: keystone/assignment/controllers.py:333
+msgid "Specify one of domain or project"
+msgstr ""
+
+#: keystone/assignment/controllers.py:338
+#: keystone/assignment/controllers.py:758
+msgid "Specify a user or group, not both"
+msgstr ""
+
+#: keystone/assignment/controllers.py:341
+msgid "Specify one of user or group"
+msgstr ""
+
+#: keystone/assignment/controllers.py:742
+msgid "Combining effective and group filter will always result in an empty list."
+msgstr ""
+
+#: keystone/assignment/controllers.py:747
+msgid ""
+"Combining effective, domain and inherited filters will always result in "
+"an empty list."
+msgstr ""
+
+#: keystone/assignment/core.py:228
+msgid "Must specify either domain or project"
+msgstr ""
+
+#: keystone/assignment/core.py:493
+#, python-format
+msgid "Project (%s)"
+msgstr ""
+
+#: keystone/assignment/core.py:495
+#, python-format
+msgid "Domain (%s)"
+msgstr ""
+
+#: keystone/assignment/core.py:497
+msgid "Unknown Target"
+msgstr ""
+
+#: keystone/assignment/backends/ldap.py:92
+msgid "Domain metadata not supported by LDAP"
+msgstr ""
+
+#: keystone/assignment/backends/ldap.py:381
+#, python-format
+msgid "User %(user_id)s already has role %(role_id)s in tenant %(tenant_id)s"
+msgstr ""
+
+#: keystone/assignment/backends/ldap.py:387
+#, python-format
+msgid "Role %s not found"
+msgstr ""
+
+#: keystone/assignment/backends/ldap.py:402
+#: keystone/assignment/backends/sql.py:335
+#, python-format
+msgid "Cannot remove role that has not been granted, %s"
+msgstr ""
+
+#: keystone/assignment/backends/sql.py:356
+#, python-format
+msgid "Unexpected assignment type encountered, %s"
+msgstr ""
+
+#: keystone/assignment/role_backends/ldap.py:61 keystone/catalog/core.py:103
+#: keystone/common/ldap/core.py:1401 keystone/resource/backends/ldap.py:149
+#, python-format
+msgid "Duplicate ID, %s."
+msgstr ""
+
+#: keystone/assignment/role_backends/ldap.py:69
+#: keystone/common/ldap/core.py:1391
+#, python-format
+msgid "Duplicate name, %s."
+msgstr ""
+
+#: keystone/assignment/role_backends/ldap.py:119
+#, python-format
+msgid "Cannot duplicate name %s"
+msgstr ""
+
+#: keystone/auth/controllers.py:60
+#, python-format
+msgid ""
+"Cannot load an auth-plugin by class-name without a \"method\" attribute "
+"defined: %s"
+msgstr ""
+
+#: keystone/auth/controllers.py:71
+#, python-format
+msgid ""
+"Auth plugin %(plugin)s is requesting previously registered method "
+"%(method)s"
+msgstr ""
+
+#: keystone/auth/controllers.py:115
+#, python-format
+msgid ""
+"Unable to reconcile identity attribute %(attribute)s as it has "
+"conflicting values %(new)s and %(old)s"
+msgstr ""
+
+#: keystone/auth/controllers.py:336
+msgid "Scoping to both domain and project is not allowed"
+msgstr ""
+
+#: keystone/auth/controllers.py:339
+msgid "Scoping to both domain and trust is not allowed"
+msgstr ""
+
+#: keystone/auth/controllers.py:342
+msgid "Scoping to both project and trust is not allowed"
+msgstr ""
+
+#: keystone/auth/controllers.py:512
+msgid "User not found"
+msgstr ""
+
+#: keystone/auth/controllers.py:616
+msgid "A project-scoped token is required to produce a service catalog."
+msgstr ""
+
+#: keystone/auth/plugins/external.py:46
+msgid "No authenticated user"
+msgstr ""
+
+#: keystone/auth/plugins/external.py:56
+#, python-format
+msgid "Unable to lookup user %s"
+msgstr ""
+
+#: keystone/auth/plugins/external.py:107
+msgid "auth_type is not Negotiate"
+msgstr ""
+
+#: keystone/auth/plugins/mapped.py:244
+msgid "Could not map user"
+msgstr ""
+
+#: keystone/auth/plugins/oauth1.py:39
+#, python-format
+msgid "%s not supported"
+msgstr ""
+
+#: keystone/auth/plugins/oauth1.py:57
+msgid "Access token is expired"
+msgstr ""
+
+#: keystone/auth/plugins/oauth1.py:71
+msgid "Could not validate the access token"
+msgstr ""
+
+#: keystone/auth/plugins/password.py:46
+msgid "Invalid username or password"
+msgstr ""
+
+#: keystone/auth/plugins/token.py:72 keystone/token/controllers.py:160
+msgid "rescope a scoped token"
+msgstr ""
+
+#: keystone/catalog/controllers.py:168
+#, python-format
+msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\""
+msgstr ""
+
+#: keystone/common/authorization.py:47 keystone/common/wsgi.py:64
+#, python-format
+msgid "token reference must be a KeystoneToken type, got: %s"
+msgstr ""
+
+#: keystone/common/base64utils.py:66
+msgid "pad must be single character"
+msgstr ""
+
+#: keystone/common/base64utils.py:215
+#, python-format
+msgid "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
+msgstr ""
+
+#: keystone/common/base64utils.py:219
+#, python-format
+msgid "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
+msgstr ""
+
+#: keystone/common/base64utils.py:225
+#, python-format
+msgid "text is not a multiple of 4, but contains pad \"%s\""
+msgstr ""
+
+#: keystone/common/base64utils.py:244 keystone/common/base64utils.py:265
+msgid "padded base64url text must be multiple of 4 characters"
+msgstr ""
+
+#: keystone/common/controller.py:237 keystone/token/providers/common.py:589
+msgid "Non-default domain is not supported"
+msgstr ""
+
+#: keystone/common/controller.py:305 keystone/identity/core.py:428
+#: keystone/resource/core.py:761 keystone/resource/backends/ldap.py:61
+#, python-format
+msgid "Expected dict or list: %s"
+msgstr ""
+
+#: keystone/common/controller.py:318
+msgid "Marker could not be found"
+msgstr ""
+
+#: keystone/common/controller.py:329
+msgid "Invalid limit value"
+msgstr ""
+
+#: keystone/common/controller.py:637
+msgid "Cannot change Domain ID"
+msgstr ""
+
+#: keystone/common/controller.py:666
+msgid "domain_id is required as part of entity"
+msgstr ""
+
+#: keystone/common/controller.py:701
+msgid "A domain-scoped token must be used"
+msgstr ""
+
+#: keystone/common/dependency.py:68
+#, python-format
+msgid "Unregistered dependency: %(name)s for %(targets)s"
+msgstr ""
+
+#: keystone/common/dependency.py:108
+msgid "event_callbacks must be a dict"
+msgstr ""
+
+#: keystone/common/dependency.py:113
+#, python-format
+msgid "event_callbacks[%s] must be a dict"
+msgstr ""
+
+#: keystone/common/pemutils.py:223
+#, python-format
+msgid "unknown pem_type \"%(pem_type)s\", valid types are: %(valid_pem_types)s"
+msgstr ""
+
+#: keystone/common/pemutils.py:242
+#, python-format
+msgid ""
+"unknown pem header \"%(pem_header)s\", valid headers are: "
+"%(valid_pem_headers)s"
+msgstr ""
+
+#: keystone/common/pemutils.py:298
+#, python-format
+msgid "failed to find end matching \"%s\""
+msgstr ""
+
+#: keystone/common/pemutils.py:302
+#, python-format
+msgid ""
+"beginning & end PEM headers do not match (%(begin_pem_header)s!= "
+"%(end_pem_header)s)"
+msgstr ""
+
+#: keystone/common/pemutils.py:377
+#, python-format
+msgid "unknown pem_type: \"%s\""
+msgstr ""
+
+#: keystone/common/pemutils.py:389
+#, python-format
+msgid ""
+"failed to base64 decode %(pem_type)s PEM at position%(position)d: "
+"%(err_msg)s"
+msgstr ""
+
+#: keystone/common/utils.py:164 keystone/credential/controllers.py:44
+msgid "Invalid blob in credential"
+msgstr ""
+
+#: keystone/common/wsgi.py:330
+#, python-format
+msgid "%s field is required and cannot be empty"
+msgstr ""
+
+#: keystone/common/wsgi.py:342
+#, python-format
+msgid "%s field(s) cannot be empty"
+msgstr ""
+
+#: keystone/common/wsgi.py:563
+msgid "The resource could not be found."
+msgstr ""
+
+#: keystone/common/wsgi.py:704
+#, python-format
+msgid "Unexpected status requested for JSON Home response, %s"
+msgstr ""
+
+#: keystone/common/cache/_memcache_pool.py:113
+#, python-format
+msgid "Unable to get a connection from pool id %(id)s after %(seconds)s seconds."
+msgstr ""
+
+#: keystone/common/cache/core.py:132
+msgid "region not type dogpile.cache.CacheRegion"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:231
+msgid "db_hosts value is required"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:236
+msgid "database db_name is required"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:241
+msgid "cache_collection name is required"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:252
+msgid "integer value expected for w (write concern attribute)"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:260
+msgid "replicaset_name required when use_replica is True"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:275
+msgid "integer value expected for mongo_ttl_seconds"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:301
+msgid "no ssl support available"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:310
+#, python-format
+msgid ""
+"Invalid ssl_cert_reqs value of %s, must be one of \"NONE\", \"OPTIONAL\","
+" \"REQUIRED\""
+msgstr ""
+
+#: keystone/common/kvs/core.py:71
+#, python-format
+msgid "Lock Timeout occurred for key, %(target)s"
+msgstr ""
+
+#: keystone/common/kvs/core.py:106
+#, python-format
+msgid "KVS region %s is already configured. Cannot reconfigure."
+msgstr ""
+
+#: keystone/common/kvs/core.py:145
+#, python-format
+msgid "Key Value Store not configured: %s"
+msgstr ""
+
+#: keystone/common/kvs/core.py:198
+msgid "`key_mangler` option must be a function reference"
+msgstr ""
+
+#: keystone/common/kvs/core.py:353
+#, python-format
+msgid "Lock key must match target key: %(lock)s != %(target)s"
+msgstr ""
+
+#: keystone/common/kvs/core.py:357
+msgid "Must be called within an active lock context."
+msgstr ""
+
+#: keystone/common/kvs/backends/memcached.py:69
+#, python-format
+msgid "Maximum lock attempts on %s occurred."
+msgstr ""
+
+#: keystone/common/kvs/backends/memcached.py:108
+#, python-format
+msgid ""
+"Backend `%(driver)s` is not a valid memcached backend. Valid drivers: "
+"%(driver_list)s"
+msgstr ""
+
+#: keystone/common/kvs/backends/memcached.py:178
+msgid "`key_mangler` functions must be callable."
+msgstr ""
+
+#: keystone/common/ldap/core.py:191
+#, python-format
+msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s"
+msgstr ""
+
+#: keystone/common/ldap/core.py:201
+#, python-format
+msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s"
+msgstr ""
+
+#: keystone/common/ldap/core.py:213
+#, python-format
+msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s"
+msgstr ""
+
+#: keystone/common/ldap/core.py:588
+msgid "Invalid TLS / LDAPS combination"
+msgstr ""
+
+#: keystone/common/ldap/core.py:593
+#, python-format
+msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available"
+msgstr ""
+
+#: keystone/common/ldap/core.py:603
+#, python-format
+msgid "tls_cacertfile %s not found or is not a file"
+msgstr ""
+
+#: keystone/common/ldap/core.py:615
+#, python-format
+msgid "tls_cacertdir %s not found or is not a directory"
+msgstr ""
+
+#: keystone/common/ldap/core.py:1326
+#, python-format
+msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s"
+msgstr ""
+
+#: keystone/common/ldap/core.py:1370
+#, python-format
+msgid "LDAP %s create"
+msgstr ""
+
+#: keystone/common/ldap/core.py:1375
+#, python-format
+msgid "LDAP %s update"
+msgstr ""
+
+#: keystone/common/ldap/core.py:1380
+#, python-format
+msgid "LDAP %s delete"
+msgstr ""
+
+#: keystone/common/ldap/core.py:1522
+msgid ""
+"Disabling an entity where the 'enable' attribute is ignored by "
+"configuration."
+msgstr ""
+
+#: keystone/common/ldap/core.py:1533
+#, python-format
+msgid "Cannot change %(option_name)s %(attr)s"
+msgstr ""
+
+#: keystone/common/ldap/core.py:1620
+#, python-format
+msgid "Member %(member)s is already a member of group %(group)s"
+msgstr ""
+
+#: keystone/common/sql/core.py:219
+msgid ""
+"Cannot truncate a driver call without hints list as first parameter after"
+" self "
+msgstr ""
+
+#: keystone/common/sql/core.py:410
+msgid "Duplicate Entry"
+msgstr ""
+
+#: keystone/common/sql/core.py:426
+#, python-format
+msgid "An unexpected error occurred when trying to store %s"
+msgstr ""
+
+#: keystone/common/sql/migration_helpers.py:187
+#: keystone/common/sql/migration_helpers.py:245
+#, python-format
+msgid "%s extension does not exist."
+msgstr ""
+
+#: keystone/common/validation/validators.py:54
+#, python-format
+msgid "Invalid input for field '%(path)s'. The value is '%(value)s'."
+msgstr ""
+
+#: keystone/contrib/ec2/controllers.py:318
+msgid "Token belongs to another user"
+msgstr ""
+
+#: keystone/contrib/ec2/controllers.py:346
+msgid "Credential belongs to another user"
+msgstr ""
+
+#: keystone/contrib/endpoint_filter/backends/sql.py:69
+#, python-format
+msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s"
+msgstr ""
+
+#: keystone/contrib/endpoint_filter/backends/sql.py:180
+msgid "Endpoint Group Project Association not found"
+msgstr ""
+
+#: keystone/contrib/endpoint_policy/core.py:258
+#, python-format
+msgid "No policy is associated with endpoint %(endpoint_id)s."
+msgstr ""
+
+#: keystone/contrib/federation/controllers.py:274
+msgid "Missing entity ID from environment"
+msgstr ""
+
+#: keystone/contrib/federation/controllers.py:282
+msgid "Request must have an origin query parameter"
+msgstr ""
+
+#: keystone/contrib/federation/controllers.py:292
+#, python-format
+msgid "%(host)s is not a trusted dashboard host"
+msgstr ""
+
+#: keystone/contrib/federation/controllers.py:333
+msgid "Use a project scoped token when attempting to create a SAML assertion"
+msgstr ""
+
+#: keystone/contrib/federation/idp.py:454
+#, python-format
+msgid "Cannot open certificate %(cert_file)s. Reason: %(reason)s"
+msgstr ""
+
+#: keystone/contrib/federation/idp.py:521
+msgid "Ensure configuration option idp_entity_id is set."
+msgstr ""
+
+#: keystone/contrib/federation/idp.py:524
+msgid "Ensure configuration option idp_sso_endpoint is set."
+msgstr ""
+
+#: keystone/contrib/federation/idp.py:544
+msgid ""
+"idp_contact_type must be one of: [technical, other, support, "
+"administrative or billing."
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:178
+msgid "Federation token is expired"
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:208
+msgid ""
+"Could not find Identity Provider identifier in environment, check "
+"[federation] remote_id_attribute for details."
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:213
+msgid ""
+"Incoming identity provider identifier not included among the accepted "
+"identifiers."
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:501
+#, python-format
+msgid "User type %s not supported"
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:537
+#, python-format
+msgid ""
+"Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords "
+"must be specified."
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:753
+#, python-format
+msgid "Identity Provider %(idp)s is disabled"
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:761
+#, python-format
+msgid "Service Provider %(sp)s is disabled"
+msgstr ""
+
+#: keystone/contrib/oauth1/controllers.py:99
+msgid "Cannot change consumer secret"
+msgstr ""
+
+#: keystone/contrib/oauth1/controllers.py:131
+msgid "Cannot list request tokens with a token issued via delegation."
+msgstr ""
+
+#: keystone/contrib/oauth1/controllers.py:192
+#: keystone/contrib/oauth1/backends/sql.py:270
+msgid "User IDs do not match"
+msgstr ""
+
+#: keystone/contrib/oauth1/controllers.py:199
+msgid "Could not find role"
+msgstr ""
+
+#: keystone/contrib/oauth1/controllers.py:248
+msgid "Invalid signature"
+msgstr ""
+
+#: keystone/contrib/oauth1/controllers.py:299
+#: keystone/contrib/oauth1/controllers.py:377
+msgid "Request token is expired"
+msgstr ""
+
+#: keystone/contrib/oauth1/controllers.py:313
+msgid "There should not be any non-oauth parameters"
+msgstr ""
+
+#: keystone/contrib/oauth1/controllers.py:317
+msgid "provided consumer key does not match stored consumer key"
+msgstr ""
+
+#: keystone/contrib/oauth1/controllers.py:321
+msgid "provided verifier does not match stored verifier"
+msgstr ""
+
+#: keystone/contrib/oauth1/controllers.py:325
+msgid "provided request key does not match stored request key"
+msgstr ""
+
+#: keystone/contrib/oauth1/controllers.py:329
+msgid "Request Token does not have an authorizing user id"
+msgstr ""
+
+#: keystone/contrib/oauth1/controllers.py:366
+msgid "Cannot authorize a request token with a token issued via delegation."
+msgstr ""
+
+#: keystone/contrib/oauth1/controllers.py:396
+msgid "authorizing user does not have role required"
+msgstr ""
+
+#: keystone/contrib/oauth1/controllers.py:409
+msgid "User is not a member of the requested project"
+msgstr ""
+
+#: keystone/contrib/oauth1/backends/sql.py:91
+msgid "Consumer not found"
+msgstr ""
+
+#: keystone/contrib/oauth1/backends/sql.py:186
+msgid "Request token not found"
+msgstr ""
+
+#: keystone/contrib/oauth1/backends/sql.py:250
+msgid "Access token not found"
+msgstr ""
+
+#: keystone/contrib/revoke/controllers.py:33
+#, python-format
+msgid "invalid date format %s"
+msgstr ""
+
+#: keystone/contrib/revoke/core.py:150
+msgid ""
+"The revoke call must not have both domain_id and project_id. This is a "
+"bug in the Keystone server. The current request is aborted."
+msgstr ""
+
+#: keystone/contrib/revoke/core.py:218 keystone/token/provider.py:207
+#: keystone/token/provider.py:230 keystone/token/provider.py:296
+#: keystone/token/provider.py:303
+msgid "Failed to validate token"
+msgstr ""
+
+#: keystone/identity/controllers.py:72
+msgid "Enabled field must be a boolean"
+msgstr ""
+
+#: keystone/identity/controllers.py:98
+msgid "Enabled field should be a boolean"
+msgstr ""
+
+#: keystone/identity/core.py:112
+#, python-format
+msgid "Database at /domains/%s/config"
+msgstr ""
+
+#: keystone/identity/core.py:287 keystone/identity/backends/ldap.py:59
+#: keystone/identity/backends/ldap.py:61 keystone/identity/backends/ldap.py:67
+#: keystone/identity/backends/ldap.py:69 keystone/identity/backends/sql.py:104
+#: keystone/identity/backends/sql.py:106
+msgid "Invalid user / password"
+msgstr ""
+
+#: keystone/identity/core.py:693
+#, python-format
+msgid "User is disabled: %s"
+msgstr ""
+
+#: keystone/identity/core.py:735
+msgid "Cannot change user ID"
+msgstr ""
+
+#: keystone/identity/backends/ldap.py:99
+msgid "Cannot change user name"
+msgstr ""
+
+#: keystone/identity/backends/ldap.py:188 keystone/identity/backends/sql.py:188
+#: keystone/identity/backends/sql.py:206
+#, python-format
+msgid "User '%(user_id)s' not found in group '%(group_id)s'"
+msgstr ""
+
+#: keystone/identity/backends/ldap.py:339
+#, python-format
+msgid "User %(user_id)s is already a member of group %(group_id)s"
+msgstr ""
+
+#: keystone/models/token_model.py:61
+msgid "Found invalid token: scoped to both project and domain."
+msgstr ""
+
+#: keystone/openstack/common/versionutils.py:108
+#, python-format
+msgid ""
+"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and "
+"may be removed in %(remove_in)s."
+msgstr ""
+
+#: keystone/openstack/common/versionutils.py:112
+#, python-format
+msgid ""
+"%(what)s is deprecated as of %(as_of)s and may be removed in "
+"%(remove_in)s. It will not be superseded."
+msgstr ""
+
+#: keystone/openstack/common/versionutils.py:116
+#, python-format
+msgid "%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s."
+msgstr ""
+
+#: keystone/openstack/common/versionutils.py:119
+#, python-format
+msgid "%(what)s is deprecated as of %(as_of)s. It will not be superseded."
+msgstr ""
+
+#: keystone/openstack/common/versionutils.py:241
+#, python-format
+msgid "Deprecated: %s"
+msgstr ""
+
+#: keystone/openstack/common/versionutils.py:259
+#, python-format
+msgid "Fatal call to deprecated config: %(msg)s"
+msgstr ""
+
+#: keystone/resource/controllers.py:231
+msgid ""
+"Cannot use parents_as_list and parents_as_ids query params at the same "
+"time."
+msgstr ""
+
+#: keystone/resource/controllers.py:237
+msgid ""
+"Cannot use subtree_as_list and subtree_as_ids query params at the same "
+"time."
+msgstr ""
+
+#: keystone/resource/core.py:80
+#, python-format
+msgid "max hierarchy depth reached for %s branch."
+msgstr ""
+
+#: keystone/resource/core.py:97
+msgid "cannot create a project within a different domain than its parents."
+msgstr ""
+
+#: keystone/resource/core.py:101
+#, python-format
+msgid "cannot create a project in a branch containing a disabled project: %s"
+msgstr ""
+
+#: keystone/resource/core.py:123
+#, python-format
+msgid "Domain is disabled: %s"
+msgstr ""
+
+#: keystone/resource/core.py:141
+#, python-format
+msgid "Domain cannot be named %s"
+msgstr ""
+
+#: keystone/resource/core.py:144
+#, python-format
+msgid "Domain cannot have ID %s"
+msgstr ""
+
+#: keystone/resource/core.py:156
+#, python-format
+msgid "Project is disabled: %s"
+msgstr ""
+
+#: keystone/resource/core.py:176
+#, python-format
+msgid "cannot enable project %s since it has disabled parents"
+msgstr ""
+
+#: keystone/resource/core.py:184
+#, python-format
+msgid "cannot disable project %s since its subtree contains enabled projects"
+msgstr ""
+
+#: keystone/resource/core.py:195
+msgid "Update of `parent_id` is not allowed."
+msgstr ""
+
+#: keystone/resource/core.py:222
+#, python-format
+msgid "cannot delete the project %s since it is not a leaf in the hierarchy."
+msgstr ""
+
+#: keystone/resource/core.py:376
+msgid "Multiple domains are not supported"
+msgstr ""
+
+#: keystone/resource/core.py:429
+msgid "delete the default domain"
+msgstr ""
+
+#: keystone/resource/core.py:440
+msgid "cannot delete a domain that is enabled, please disable it first."
+msgstr ""
+
+#: keystone/resource/core.py:841
+msgid "No options specified"
+msgstr ""
+
+#: keystone/resource/core.py:847
+#, python-format
+msgid ""
+"The value of group %(group)s specified in the config should be a "
+"dictionary of options"
+msgstr ""
+
+#: keystone/resource/core.py:871
+#, python-format
+msgid ""
+"Option %(option)s found with no group specified while checking domain "
+"configuration request"
+msgstr ""
+
+#: keystone/resource/core.py:878
+#, python-format
+msgid "Group %(group)s is not supported for domain specific configurations"
+msgstr ""
+
+#: keystone/resource/core.py:885
+#, python-format
+msgid ""
+"Option %(option)s in group %(group)s is not supported for domain specific"
+" configurations"
+msgstr ""
+
+#: keystone/resource/core.py:938
+msgid "An unexpected error occurred when retrieving domain configs"
+msgstr ""
+
+#: keystone/resource/core.py:1013 keystone/resource/core.py:1097
+#: keystone/resource/core.py:1167 keystone/resource/config_backends/sql.py:70
+#, python-format
+msgid "option %(option)s in group %(group)s"
+msgstr ""
+
+#: keystone/resource/core.py:1016 keystone/resource/core.py:1102
+#: keystone/resource/core.py:1163
+#, python-format
+msgid "group %(group)s"
+msgstr ""
+
+#: keystone/resource/core.py:1018
+msgid "any options"
+msgstr ""
+
+#: keystone/resource/core.py:1062
+#, python-format
+msgid ""
+"Trying to update option %(option)s in group %(group)s, so that, and only "
+"that, option must be specified  in the config"
+msgstr ""
+
+#: keystone/resource/core.py:1067
+#, python-format
+msgid ""
+"Trying to update group %(group)s, so that, and only that, group must be "
+"specified in the config"
+msgstr ""
+
+#: keystone/resource/core.py:1076
+#, python-format
+msgid ""
+"request to update group %(group)s, but config provided contains group "
+"%(group_other)s instead"
+msgstr ""
+
+#: keystone/resource/core.py:1083
+#, python-format
+msgid ""
+"Trying to update option %(option)s in group %(group)s, but config "
+"provided contains option %(option_other)s instead"
+msgstr ""
+
+#: keystone/resource/backends/ldap.py:151
+#: keystone/resource/backends/ldap.py:159
+#: keystone/resource/backends/ldap.py:163
+msgid "Domains are read-only against LDAP"
+msgstr ""
+
+#: keystone/server/eventlet.py:77
+msgid ""
+"Running keystone via eventlet is deprecated as of Kilo in favor of "
+"running in a WSGI server (e.g. mod_wsgi). Support for keystone under "
+"eventlet will be removed in the \"M\"-Release."
+msgstr ""
+
+#: keystone/server/eventlet.py:90
+#, python-format
+msgid "Failed to start the %(name)s server"
+msgstr ""
+
+#: keystone/token/controllers.py:391
+#, python-format
+msgid "User %(u_id)s is unauthorized for tenant %(t_id)s"
+msgstr ""
+
+#: keystone/token/controllers.py:410 keystone/token/controllers.py:413
+msgid "Token does not belong to specified tenant."
+msgstr ""
+
+#: keystone/token/persistence/backends/kvs.py:133
+#, python-format
+msgid "Unknown token version %s"
+msgstr ""
+
+#: keystone/token/providers/common.py:250
+#: keystone/token/providers/common.py:355
+#, python-format
+msgid "User %(user_id)s has no access to project %(project_id)s"
+msgstr ""
+
+#: keystone/token/providers/common.py:255
+#: keystone/token/providers/common.py:360
+#, python-format
+msgid "User %(user_id)s has no access to domain %(domain_id)s"
+msgstr ""
+
+#: keystone/token/providers/common.py:282
+msgid "Trustor is disabled."
+msgstr ""
+
+#: keystone/token/providers/common.py:346
+msgid "Trustee has no delegated roles."
+msgstr ""
+
+#: keystone/token/providers/common.py:407
+#, python-format
+msgid "Invalid audit info data type: %(data)s (%(type)s)"
+msgstr ""
+
+#: keystone/token/providers/common.py:435
+msgid "User is not a trustee."
+msgstr ""
+
+#: keystone/token/providers/common.py:579
+msgid ""
+"Attempting to use OS-FEDERATION token with V2 Identity Service, use V3 "
+"Authentication"
+msgstr ""
+
+#: keystone/token/providers/common.py:597
+msgid "Domain scoped token is not supported"
+msgstr ""
+
+#: keystone/token/providers/pki.py:48 keystone/token/providers/pkiz.py:30
+msgid "Unable to sign token."
+msgstr ""
+
+#: keystone/token/providers/fernet/core.py:210
+msgid ""
+"This is not a v2.0 Fernet token. Use v3 for trust, domain, or federated "
+"tokens."
+msgstr ""
+
+#: keystone/token/providers/fernet/token_formatters.py:189
+#, python-format
+msgid "This is not a recognized Fernet payload version: %s"
+msgstr ""
+
+#: keystone/trust/controllers.py:148
+msgid "Redelegation allowed for delegated by trust only"
+msgstr ""
+
+#: keystone/trust/controllers.py:181
+msgid "The authenticated user should match the trustor."
+msgstr ""
+
+#: keystone/trust/controllers.py:186
+msgid "At least one role should be specified."
+msgstr ""
+
+#: keystone/trust/core.py:57
+#, python-format
+msgid ""
+"Remaining redelegation depth of %(redelegation_depth)d out of allowed "
+"range of [0..%(max_count)d]"
+msgstr ""
+
+#: keystone/trust/core.py:66
+#, python-format
+msgid ""
+"Field \"remaining_uses\" is set to %(value)s while it must not be set in "
+"order to redelegate a trust"
+msgstr ""
+
+#: keystone/trust/core.py:77
+msgid "Requested expiration time is more than redelegated trust can provide"
+msgstr ""
+
+#: keystone/trust/core.py:87
+msgid "Some of requested roles are not in redelegated trust"
+msgstr ""
+
+#: keystone/trust/core.py:116
+msgid "One of the trust agents is disabled or deleted"
+msgstr ""
+
+#: keystone/trust/core.py:135
+msgid "remaining_uses must be a positive integer or null."
+msgstr ""
+
+#: keystone/trust/core.py:141
+#, python-format
+msgid ""
+"Requested redelegation depth of %(requested_count)d is greater than "
+"allowed %(max_count)d"
+msgstr ""
+
+#: keystone/trust/core.py:147
+msgid "remaining_uses must not be set if redelegation is allowed"
+msgstr ""
+
+#: keystone/trust/core.py:157
+msgid ""
+"Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting"
+" this parameter is advised."
+msgstr ""
+
diff --git a/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-critical.po
new file mode 100644 (file)
index 0000000..b7f255c
--- /dev/null
@@ -0,0 +1,25 @@
+# Translations template for keystone.
+# Copyright (C) 2014 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"PO-Revision-Date: 2014-08-31 15:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: Korean (Korea) (http://www.transifex.com/projects/p/keystone/"
+"language/ko_KR/)\n"
+"Language: ko_KR\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+
+#: keystone/catalog/backends/templated.py:106
+#, python-format
+msgid "Unable to open template file %s"
+msgstr "템플리트 파일 %s을(를) 열 수 없음"
diff --git a/keystone-moon/keystone/locale/pl_PL/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/pl_PL/LC_MESSAGES/keystone-log-critical.po
new file mode 100644 (file)
index 0000000..b774906
--- /dev/null
@@ -0,0 +1,26 @@
+# Translations template for keystone.
+# Copyright (C) 2014 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"PO-Revision-Date: 2014-08-31 15:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: Polish (Poland) (http://www.transifex.com/projects/p/keystone/"
+"language/pl_PL/)\n"
+"Language: pl_PL\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 "
+"|| n%100>=20) ? 1 : 2);\n"
+
+#: keystone/catalog/backends/templated.py:106
+#, python-format
+msgid "Unable to open template file %s"
+msgstr "Błąd podczas otwierania pliku %s"
diff --git a/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone-log-critical.po
new file mode 100644 (file)
index 0000000..689a23e
--- /dev/null
@@ -0,0 +1,25 @@
+# Translations template for keystone.
+# Copyright (C) 2014 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"PO-Revision-Date: 2014-08-31 15:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/"
+"keystone/language/pt_BR/)\n"
+"Language: pt_BR\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=2; plural=(n > 1);\n"
+
+#: keystone/catalog/backends/templated.py:106
+#, python-format
+msgid "Unable to open template file %s"
+msgstr "Não é possível abrir o arquivo de modelo %s"
diff --git a/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone-log-error.po b/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone-log-error.po
new file mode 100644 (file)
index 0000000..5f81b98
--- /dev/null
@@ -0,0 +1,179 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-03-09 06:03+0000\n"
+"PO-Revision-Date: 2015-03-07 04:31+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/"
+"keystone/language/pt_BR/)\n"
+"Language: pt_BR\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=2; plural=(n > 1);\n"
+
+#: keystone/notifications.py:304
+msgid "Failed to construct notifier"
+msgstr ""
+
+#: keystone/notifications.py:389
+#, python-format
+msgid "Failed to send %(res_id)s %(event_type)s notification"
+msgstr "Falha ao enviar notificação %(res_id)s %(event_type)s"
+
+#: keystone/notifications.py:606
+#, python-format
+msgid "Failed to send %(action)s %(event_type)s notification"
+msgstr ""
+
+#: keystone/catalog/core.py:62
+#, python-format
+msgid "Malformed endpoint - %(url)r is not a string"
+msgstr ""
+
+#: keystone/catalog/core.py:66
+#, python-format
+msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s"
+msgstr "Endpoint mal formado %(url)s - chave desconhecida %(keyerror)s"
+
+#: keystone/catalog/core.py:71
+#, python-format
+msgid ""
+"Malformed endpoint '%(url)s'. The following type error occurred during "
+"string substitution: %(typeerror)s"
+msgstr ""
+
+#: keystone/catalog/core.py:77
+#, python-format
+msgid ""
+"Malformed endpoint %s - incomplete format (are you missing a type notifier ?)"
+msgstr ""
+
+#: keystone/common/openssl.py:93
+#, python-format
+msgid "Command %(to_exec)s exited with %(retcode)s- %(output)s"
+msgstr ""
+
+#: keystone/common/openssl.py:121
+#, python-format
+msgid "Failed to remove file %(file_path)r: %(error)s"
+msgstr ""
+
+#: keystone/common/utils.py:239
+msgid ""
+"Error setting up the debug environment. Verify that the option --debug-url "
+"has the format <host>:<port> and that a debugger processes is listening on "
+"that port."
+msgstr ""
+"Erro configurando o ambiente de debug. Verifique que a opção --debug-url "
+"possui o formato <host>:<port> e que o processo debugger está escutando "
+"nesta porta."
+
+#: keystone/common/cache/core.py:100
+#, python-format
+msgid ""
+"Unable to build cache config-key. Expected format \"<argname>:<value>\". "
+"Skipping unknown format: %s"
+msgstr ""
+"Não é possível construir chave de configuração do cache. Formato esperado "
+"\"<argname>:<value>\".  Pulando formato desconhecido: %s"
+
+#: keystone/common/environment/eventlet_server.py:99
+#, python-format
+msgid "Could not bind to %(host)s:%(port)s"
+msgstr ""
+
+#: keystone/common/environment/eventlet_server.py:185
+msgid "Server error"
+msgstr "Erro do servidor"
+
+#: keystone/contrib/endpoint_policy/core.py:129
+#: keystone/contrib/endpoint_policy/core.py:228
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found in region tree - %(region_id)s."
+msgstr ""
+
+#: keystone/contrib/federation/idp.py:410
+#, python-format
+msgid "Error when signing assertion, reason: %(reason)s"
+msgstr ""
+
+#: keystone/contrib/oauth1/core.py:136
+msgid "Cannot retrieve Authorization headers"
+msgstr ""
+
+#: keystone/openstack/common/loopingcall.py:95
+msgid "in fixed duration looping call"
+msgstr "em uma chamada de laço de duração fixa"
+
+#: keystone/openstack/common/loopingcall.py:138
+msgid "in dynamic looping call"
+msgstr "em chamada de laço dinâmico"
+
+#: keystone/openstack/common/service.py:268
+msgid "Unhandled exception"
+msgstr "Exceção não tratada"
+
+#: keystone/resource/core.py:477
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found projects hierarchy - "
+"%(project_id)s."
+msgstr ""
+
+#: keystone/resource/core.py:939
+#, python-format
+msgid ""
+"Unexpected results in response for domain config - %(count)s responses, "
+"first option is %(option)s, expected option %(expected)s"
+msgstr ""
+
+#: keystone/resource/backends/sql.py:102 keystone/resource/backends/sql.py:121
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found in projects hierarchy - "
+"%(project_id)s."
+msgstr ""
+
+#: keystone/token/provider.py:292
+#, python-format
+msgid "Unexpected error or malformed token determining token expiry: %s"
+msgstr ""
+"Erro inesperado ou token mal formado ao determinar validade do token: %s"
+
+#: keystone/token/persistence/backends/kvs.py:226
+#, python-format
+msgid ""
+"Reinitializing revocation list due to error in loading revocation list from "
+"backend.  Expected `list` type got `%(type)s`. Old revocation list data: "
+"%(list)r"
+msgstr ""
+
+#: keystone/token/providers/common.py:611
+msgid "Failed to validate token"
+msgstr "Falha ao validar token"
+
+#: keystone/token/providers/pki.py:47
+msgid "Unable to sign token"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:38
+#, python-format
+msgid ""
+"Either [fernet_tokens] key_repository does not exist or Keystone does not "
+"have sufficient permission to access it: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:79
+msgid ""
+"Failed to create [fernet_tokens] key_repository: either it already exists or "
+"you don't have sufficient permissions to create it"
+msgstr ""
diff --git a/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone.po b/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone.po
new file mode 100644 (file)
index 0000000..fdb771c
--- /dev/null
@@ -0,0 +1,1546 @@
+# Portuguese (Brazil) translations for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+# Gabriel Wainer, 2013
+# Lucas Ribeiro <lucasribeiro1990@gmail.com>, 2014
+# Volmar Oliveira Junior <volmar.oliveira.jr@gmail.com>, 2013
+msgid ""
+msgstr ""
+"Project-Id-Version:  Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-03-23 06:04+0000\n"
+"PO-Revision-Date: 2015-03-21 23:03+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: Portuguese (Brazil) "
+"(http://www.transifex.com/projects/p/keystone/language/pt_BR/)\n"
+"Plural-Forms: nplurals=2; plural=(n > 1)\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+
+#: keystone/clean.py:24
+#, python-format
+msgid "%s cannot be empty."
+msgstr "%s não pode estar vazio."
+
+#: keystone/clean.py:26
+#, python-format
+msgid "%(property_name)s cannot be less than %(min_length)s characters."
+msgstr "%(property_name)s não pode ter menos de %(min_length)s caracteres."
+
+#: keystone/clean.py:31
+#, python-format
+msgid "%(property_name)s should not be greater than %(max_length)s characters."
+msgstr "%(property_name)s não deve ter mais de %(max_length)s caracteres."
+
+#: keystone/clean.py:40
+#, python-format
+msgid "%(property_name)s is not a %(display_expected_type)s"
+msgstr "%(property_name)s não é um %(display_expected_type)s"
+
+#: keystone/cli.py:283
+msgid "At least one option must be provided"
+msgstr ""
+
+#: keystone/cli.py:290
+msgid "--all option cannot be mixed with other options"
+msgstr ""
+
+#: keystone/cli.py:301
+#, python-format
+msgid "Unknown domain '%(name)s' specified by --domain-name"
+msgstr ""
+
+#: keystone/cli.py:365 keystone/tests/unit/test_cli.py:213
+msgid "At least one option must be provided, use either --all or --domain-name"
+msgstr ""
+
+#: keystone/cli.py:371 keystone/tests/unit/test_cli.py:229
+msgid "The --all option cannot be used with the --domain-name option"
+msgstr ""
+
+#: keystone/cli.py:397 keystone/tests/unit/test_cli.py:246
+#, python-format
+msgid ""
+"Invalid domain name: %(domain)s found in config file name: %(file)s - "
+"ignoring this file."
+msgstr ""
+
+#: keystone/cli.py:405 keystone/tests/unit/test_cli.py:187
+#, python-format
+msgid ""
+"Domain: %(domain)s already has a configuration defined - ignoring file: "
+"%(file)s."
+msgstr ""
+
+#: keystone/cli.py:419
+#, python-format
+msgid "Error parsing configuration file for domain: %(domain)s, file: %(file)s."
+msgstr ""
+
+#: keystone/cli.py:452
+#, python-format
+msgid ""
+"To get a more detailed information on this error, re-run this command for"
+" the specific domain, i.e.: keystone-manage domain_config_upload "
+"--domain-name %s"
+msgstr ""
+
+#: keystone/cli.py:470
+#, python-format
+msgid "Unable to locate domain config directory: %s"
+msgstr "Não é possível localizar diretório de configuração de domínio: %s"
+
+#: keystone/cli.py:503
+msgid ""
+"Unable to access the keystone database, please check it is configured "
+"correctly."
+msgstr ""
+
+#: keystone/exception.py:79
+#, python-format
+msgid ""
+"Expecting to find %(attribute)s in %(target)s - the server could not "
+"comply with the request since it is either malformed or otherwise "
+"incorrect. The client is assumed to be in error."
+msgstr ""
+
+#: keystone/exception.py:90
+#, python-format
+msgid "%(detail)s"
+msgstr ""
+
+#: keystone/exception.py:94
+msgid ""
+"Timestamp not in expected format. The server could not comply with the "
+"request since it is either malformed or otherwise incorrect. The client "
+"is assumed to be in error."
+msgstr ""
+"A data não está no formato especificado. O servidor não pôde realizar a "
+"requisição pois ela está mal formada ou incorreta. Assume-se que o "
+"cliente está com erro."
+
+#: keystone/exception.py:103
+#, python-format
+msgid ""
+"String length exceeded.The length of string '%(string)s' exceeded the "
+"limit of column %(type)s(CHAR(%(length)d))."
+msgstr ""
+"Comprimento de string excedido. O comprimento de string '%(string)s' "
+"excedeu o limite da coluna %(type)s(CHAR(%(length)d))."
+
+#: keystone/exception.py:109
+#, python-format
+msgid ""
+"Request attribute %(attribute)s must be less than or equal to %(size)i. "
+"The server could not comply with the request because the attribute size "
+"is invalid (too large). The client is assumed to be in error."
+msgstr ""
+"Atributo de requisição %(attribute)s deve ser menor ou igual a %(size)i. "
+"O servidor não pôde atender a requisição porque o tamanho do atributo é "
+"inválido (muito grande). Assume-se que o cliente está em erro."
+
+#: keystone/exception.py:119
+#, python-format
+msgid ""
+"The specified parent region %(parent_region_id)s would create a circular "
+"region hierarchy."
+msgstr ""
+
+#: keystone/exception.py:126
+#, python-format
+msgid ""
+"The password length must be less than or equal to %(size)i. The server "
+"could not comply with the request because the password is invalid."
+msgstr ""
+
+#: keystone/exception.py:134
+#, python-format
+msgid ""
+"Unable to delete region %(region_id)s because it or its child regions "
+"have associated endpoints."
+msgstr ""
+
+#: keystone/exception.py:141
+msgid ""
+"The certificates you requested are not available. It is likely that this "
+"server does not use PKI tokens otherwise this is the result of "
+"misconfiguration."
+msgstr ""
+
+#: keystone/exception.py:150
+msgid "(Disable debug mode to suppress these details.)"
+msgstr ""
+
+#: keystone/exception.py:155
+#, python-format
+msgid "%(message)s %(amendment)s"
+msgstr ""
+
+#: keystone/exception.py:163
+msgid "The request you have made requires authentication."
+msgstr "A requisição que você fez requer autenticação."
+
+#: keystone/exception.py:169
+msgid "Authentication plugin error."
+msgstr "Erro do plugin de autenticação."
+
+#: keystone/exception.py:177
+#, python-format
+msgid "Unable to find valid groups while using mapping %(mapping_id)s"
+msgstr ""
+
+#: keystone/exception.py:182
+msgid "Attempted to authenticate with an unsupported method."
+msgstr "Tentativa de autenticação com um método não suportado."
+
+#: keystone/exception.py:190
+msgid "Additional authentications steps required."
+msgstr "Passos de autenticação adicionais requeridos."
+
+#: keystone/exception.py:198
+msgid "You are not authorized to perform the requested action."
+msgstr "Você não está autorizado à realizar a ação solicitada."
+
+#: keystone/exception.py:205
+#, python-format
+msgid "You are not authorized to perform the requested action: %(action)s"
+msgstr ""
+
+#: keystone/exception.py:210
+#, python-format
+msgid ""
+"Could not change immutable attribute(s) '%(attributes)s' in target "
+"%(target)s"
+msgstr ""
+
+#: keystone/exception.py:215
+#, python-format
+msgid ""
+"Group membership across backend boundaries is not allowed, group in "
+"question is %(group_id)s, user is %(user_id)s"
+msgstr ""
+
+#: keystone/exception.py:221
+#, python-format
+msgid ""
+"Invalid mix of entities for policy association - only Endpoint, Service "
+"or Region+Service allowed. Request was - Endpoint: %(endpoint_id)s, "
+"Service: %(service_id)s, Region: %(region_id)s"
+msgstr ""
+
+#: keystone/exception.py:228
+#, python-format
+msgid "Invalid domain specific configuration: %(reason)s"
+msgstr ""
+
+#: keystone/exception.py:232
+#, python-format
+msgid "Could not find: %(target)s"
+msgstr ""
+
+#: keystone/exception.py:238
+#, python-format
+msgid "Could not find endpoint: %(endpoint_id)s"
+msgstr ""
+
+#: keystone/exception.py:245
+msgid "An unhandled exception has occurred: Could not find metadata."
+msgstr "Uma exceção não tratada ocorreu: Não foi possível encontrar metadados."
+
+#: keystone/exception.py:250
+#, python-format
+msgid "Could not find policy: %(policy_id)s"
+msgstr ""
+
+#: keystone/exception.py:254
+msgid "Could not find policy association"
+msgstr ""
+
+#: keystone/exception.py:258
+#, python-format
+msgid "Could not find role: %(role_id)s"
+msgstr ""
+
+#: keystone/exception.py:262
+#, python-format
+msgid ""
+"Could not find role assignment with role: %(role_id)s, user or group: "
+"%(actor_id)s, project or domain: %(target_id)s"
+msgstr ""
+
+#: keystone/exception.py:268
+#, python-format
+msgid "Could not find region: %(region_id)s"
+msgstr ""
+
+#: keystone/exception.py:272
+#, python-format
+msgid "Could not find service: %(service_id)s"
+msgstr ""
+
+#: keystone/exception.py:276
+#, python-format
+msgid "Could not find domain: %(domain_id)s"
+msgstr ""
+
+#: keystone/exception.py:280
+#, python-format
+msgid "Could not find project: %(project_id)s"
+msgstr ""
+
+#: keystone/exception.py:284
+#, python-format
+msgid "Cannot create project with parent: %(project_id)s"
+msgstr ""
+
+#: keystone/exception.py:288
+#, python-format
+msgid "Could not find token: %(token_id)s"
+msgstr ""
+
+#: keystone/exception.py:292
+#, python-format
+msgid "Could not find user: %(user_id)s"
+msgstr ""
+
+#: keystone/exception.py:296
+#, python-format
+msgid "Could not find group: %(group_id)s"
+msgstr ""
+
+#: keystone/exception.py:300
+#, python-format
+msgid "Could not find mapping: %(mapping_id)s"
+msgstr ""
+
+#: keystone/exception.py:304
+#, python-format
+msgid "Could not find trust: %(trust_id)s"
+msgstr ""
+
+#: keystone/exception.py:308
+#, python-format
+msgid "No remaining uses for trust: %(trust_id)s"
+msgstr ""
+
+#: keystone/exception.py:312
+#, python-format
+msgid "Could not find credential: %(credential_id)s"
+msgstr ""
+
+#: keystone/exception.py:316
+#, python-format
+msgid "Could not find version: %(version)s"
+msgstr ""
+
+#: keystone/exception.py:320
+#, python-format
+msgid "Could not find Endpoint Group: %(endpoint_group_id)s"
+msgstr ""
+
+#: keystone/exception.py:324
+#, python-format
+msgid "Could not find Identity Provider: %(idp_id)s"
+msgstr ""
+
+#: keystone/exception.py:328
+#, python-format
+msgid "Could not find Service Provider: %(sp_id)s"
+msgstr ""
+
+#: keystone/exception.py:332
+#, python-format
+msgid ""
+"Could not find federated protocol %(protocol_id)s for Identity Provider: "
+"%(idp_id)s"
+msgstr ""
+
+#: keystone/exception.py:343
+#, python-format
+msgid ""
+"Could not find %(group_or_option)s in domain configuration for domain "
+"%(domain_id)s"
+msgstr ""
+
+#: keystone/exception.py:348
+#, python-format
+msgid "Conflict occurred attempting to store %(type)s - %(details)s"
+msgstr ""
+
+#: keystone/exception.py:356
+msgid "An unexpected error prevented the server from fulfilling your request."
+msgstr ""
+
+#: keystone/exception.py:359
+#, python-format
+msgid ""
+"An unexpected error prevented the server from fulfilling your request: "
+"%(exception)s"
+msgstr ""
+
+#: keystone/exception.py:382
+#, python-format
+msgid "Unable to consume trust %(trust_id)s, unable to acquire lock."
+msgstr ""
+
+#: keystone/exception.py:387
+msgid ""
+"Expected signing certificates are not available on the server. Please "
+"check Keystone configuration."
+msgstr ""
+
+#: keystone/exception.py:393
+#, python-format
+msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details."
+msgstr ""
+"URL de endpoint mal-formada (%(endpoint)s), veja o log de ERROS para "
+"detalhes."
+
+#: keystone/exception.py:398
+#, python-format
+msgid ""
+"Group %(group_id)s returned by mapping %(mapping_id)s was not found in "
+"the backend."
+msgstr ""
+
+#: keystone/exception.py:403
+#, python-format
+msgid "Error while reading metadata file, %(reason)s"
+msgstr ""
+
+#: keystone/exception.py:407
+#, python-format
+msgid ""
+"Unexpected combination of grant attributes - User: %(user_id)s, Group: "
+"%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s"
+msgstr ""
+
+#: keystone/exception.py:414
+msgid "The action you have requested has not been implemented."
+msgstr "A ação que você solicitou não foi implementada."
+
+#: keystone/exception.py:421
+msgid "The service you have requested is no longer available on this server."
+msgstr ""
+
+#: keystone/exception.py:428
+#, python-format
+msgid "The Keystone configuration file %(config_file)s could not be found."
+msgstr ""
+
+#: keystone/exception.py:433
+msgid ""
+"No encryption keys found; run keystone-manage fernet_setup to bootstrap "
+"one."
+msgstr ""
+
+#: keystone/exception.py:438
+#, python-format
+msgid ""
+"The Keystone domain-specific configuration has specified more than one "
+"SQL driver (only one is permitted): %(source)s."
+msgstr ""
+
+#: keystone/exception.py:445
+#, python-format
+msgid ""
+"%(mod_name)s doesn't provide database migrations. The migration "
+"repository path at %(path)s doesn't exist or isn't a directory."
+msgstr ""
+
+#: keystone/exception.py:457
+#, python-format
+msgid ""
+"Unable to sign SAML assertion. It is likely that this server does not "
+"have xmlsec1 installed, or this is the result of misconfiguration. Reason"
+" %(reason)s"
+msgstr ""
+
+#: keystone/exception.py:465
+msgid ""
+"No Authorization headers found, cannot proceed with OAuth related calls, "
+"if running under HTTPd or Apache, ensure WSGIPassAuthorization is set to "
+"On."
+msgstr ""
+
+#: keystone/notifications.py:250
+#, python-format
+msgid "%(event)s is not a valid notification event, must be one of: %(actions)s"
+msgstr ""
+
+#: keystone/notifications.py:259
+#, python-format
+msgid "Method not callable: %s"
+msgstr ""
+
+#: keystone/assignment/controllers.py:107 keystone/identity/controllers.py:69
+#: keystone/resource/controllers.py:78
+msgid "Name field is required and cannot be empty"
+msgstr "Campo nome é requerido e não pode ser vazio"
+
+#: keystone/assignment/controllers.py:330
+#: keystone/assignment/controllers.py:753
+msgid "Specify a domain or project, not both"
+msgstr "Especifique um domínio ou projeto, não ambos"
+
+#: keystone/assignment/controllers.py:333
+msgid "Specify one of domain or project"
+msgstr ""
+
+#: keystone/assignment/controllers.py:338
+#: keystone/assignment/controllers.py:758
+msgid "Specify a user or group, not both"
+msgstr "Epecifique um usuário ou grupo, não ambos"
+
+#: keystone/assignment/controllers.py:341
+msgid "Specify one of user or group"
+msgstr ""
+
+#: keystone/assignment/controllers.py:742
+msgid "Combining effective and group filter will always result in an empty list."
+msgstr ""
+
+#: keystone/assignment/controllers.py:747
+msgid ""
+"Combining effective, domain and inherited filters will always result in "
+"an empty list."
+msgstr ""
+
+#: keystone/assignment/core.py:228
+msgid "Must specify either domain or project"
+msgstr ""
+
+#: keystone/assignment/core.py:493
+#, python-format
+msgid "Project (%s)"
+msgstr "Projeto (%s)"
+
+#: keystone/assignment/core.py:495
+#, python-format
+msgid "Domain (%s)"
+msgstr "Domínio (%s)"
+
+#: keystone/assignment/core.py:497
+msgid "Unknown Target"
+msgstr "Alvo Desconhecido"
+
+#: keystone/assignment/backends/ldap.py:92
+msgid "Domain metadata not supported by LDAP"
+msgstr ""
+
+#: keystone/assignment/backends/ldap.py:381
+#, python-format
+msgid "User %(user_id)s already has role %(role_id)s in tenant %(tenant_id)s"
+msgstr ""
+
+#: keystone/assignment/backends/ldap.py:387
+#, python-format
+msgid "Role %s not found"
+msgstr "Role %s não localizada"
+
+#: keystone/assignment/backends/ldap.py:402
+#: keystone/assignment/backends/sql.py:335
+#, python-format
+msgid "Cannot remove role that has not been granted, %s"
+msgstr "Não é possível remover role que não foi concedido, %s"
+
+#: keystone/assignment/backends/sql.py:356
+#, python-format
+msgid "Unexpected assignment type encountered, %s"
+msgstr ""
+
+#: keystone/assignment/role_backends/ldap.py:61 keystone/catalog/core.py:103
+#: keystone/common/ldap/core.py:1400 keystone/resource/backends/ldap.py:149
+#, python-format
+msgid "Duplicate ID, %s."
+msgstr "ID duplicado, %s."
+
+#: keystone/assignment/role_backends/ldap.py:69
+#: keystone/common/ldap/core.py:1390
+#, python-format
+msgid "Duplicate name, %s."
+msgstr "Nome duplicado, %s."
+
+#: keystone/assignment/role_backends/ldap.py:119
+#, python-format
+msgid "Cannot duplicate name %s"
+msgstr ""
+
+#: keystone/auth/controllers.py:60
+#, python-format
+msgid ""
+"Cannot load an auth-plugin by class-name without a \"method\" attribute "
+"defined: %s"
+msgstr ""
+
+#: keystone/auth/controllers.py:71
+#, python-format
+msgid ""
+"Auth plugin %(plugin)s is requesting previously registered method "
+"%(method)s"
+msgstr ""
+
+#: keystone/auth/controllers.py:115
+#, python-format
+msgid ""
+"Unable to reconcile identity attribute %(attribute)s as it has "
+"conflicting values %(new)s and %(old)s"
+msgstr ""
+
+#: keystone/auth/controllers.py:336
+msgid "Scoping to both domain and project is not allowed"
+msgstr "A definição de escopo para o domínio e o projeto não é permitida"
+
+#: keystone/auth/controllers.py:339
+msgid "Scoping to both domain and trust is not allowed"
+msgstr "A definição de escopo para o domínio e a trust não é permitida"
+
+#: keystone/auth/controllers.py:342
+msgid "Scoping to both project and trust is not allowed"
+msgstr "A definição de escopo para o projeto e a trust não é permitida"
+
+#: keystone/auth/controllers.py:512
+msgid "User not found"
+msgstr "Usuário não localizado"
+
+#: keystone/auth/controllers.py:616
+msgid "A project-scoped token is required to produce a service catalog."
+msgstr ""
+
+#: keystone/auth/plugins/external.py:46
+msgid "No authenticated user"
+msgstr "Nenhum usuário autenticado"
+
+#: keystone/auth/plugins/external.py:56
+#, python-format
+msgid "Unable to lookup user %s"
+msgstr "Não é possível consultar o usuário %s"
+
+#: keystone/auth/plugins/external.py:107
+msgid "auth_type is not Negotiate"
+msgstr ""
+
+#: keystone/auth/plugins/mapped.py:244
+msgid "Could not map user"
+msgstr ""
+
+#: keystone/auth/plugins/oauth1.py:39
+#, python-format
+msgid "%s not supported"
+msgstr ""
+
+#: keystone/auth/plugins/oauth1.py:57
+msgid "Access token is expired"
+msgstr "Token de acesso expirou"
+
+#: keystone/auth/plugins/oauth1.py:71
+msgid "Could not validate the access token"
+msgstr ""
+
+#: keystone/auth/plugins/password.py:46
+msgid "Invalid username or password"
+msgstr "Nome de usuário ou senha inválidos"
+
+#: keystone/auth/plugins/token.py:72 keystone/token/controllers.py:160
+msgid "rescope a scoped token"
+msgstr ""
+
+#: keystone/catalog/controllers.py:168
+#, python-format
+msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\""
+msgstr ""
+
+#: keystone/common/authorization.py:47 keystone/common/wsgi.py:64
+#, python-format
+msgid "token reference must be a KeystoneToken type, got: %s"
+msgstr ""
+
+#: keystone/common/base64utils.py:66
+msgid "pad must be single character"
+msgstr ""
+
+#: keystone/common/base64utils.py:215
+#, python-format
+msgid "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
+msgstr ""
+
+#: keystone/common/base64utils.py:219
+#, python-format
+msgid "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
+msgstr ""
+
+#: keystone/common/base64utils.py:225
+#, python-format
+msgid "text is not a multiple of 4, but contains pad \"%s\""
+msgstr ""
+
+#: keystone/common/base64utils.py:244 keystone/common/base64utils.py:265
+msgid "padded base64url text must be multiple of 4 characters"
+msgstr ""
+
+#: keystone/common/controller.py:237 keystone/token/providers/common.py:589
+msgid "Non-default domain is not supported"
+msgstr "O domínio não padrão não é suportado"
+
+#: keystone/common/controller.py:305 keystone/identity/core.py:428
+#: keystone/resource/core.py:761 keystone/resource/backends/ldap.py:61
+#, python-format
+msgid "Expected dict or list: %s"
+msgstr "Esperado dict ou list: %s"
+
+#: keystone/common/controller.py:318
+msgid "Marker could not be found"
+msgstr "Marcador não pôde ser encontrado"
+
+#: keystone/common/controller.py:329
+msgid "Invalid limit value"
+msgstr "Valor limite inválido"
+
+#: keystone/common/controller.py:637
+msgid "Cannot change Domain ID"
+msgstr ""
+
+#: keystone/common/controller.py:666
+msgid "domain_id is required as part of entity"
+msgstr ""
+
+#: keystone/common/controller.py:701
+msgid "A domain-scoped token must be used"
+msgstr ""
+
+#: keystone/common/dependency.py:68
+#, python-format
+msgid "Unregistered dependency: %(name)s for %(targets)s"
+msgstr ""
+
+#: keystone/common/dependency.py:108
+msgid "event_callbacks must be a dict"
+msgstr ""
+
+#: keystone/common/dependency.py:113
+#, python-format
+msgid "event_callbacks[%s] must be a dict"
+msgstr ""
+
+#: keystone/common/pemutils.py:223
+#, python-format
+msgid "unknown pem_type \"%(pem_type)s\", valid types are: %(valid_pem_types)s"
+msgstr ""
+
+#: keystone/common/pemutils.py:242
+#, python-format
+msgid ""
+"unknown pem header \"%(pem_header)s\", valid headers are: "
+"%(valid_pem_headers)s"
+msgstr ""
+
+#: keystone/common/pemutils.py:298
+#, python-format
+msgid "failed to find end matching \"%s\""
+msgstr ""
+
+#: keystone/common/pemutils.py:302
+#, python-format
+msgid ""
+"beginning & end PEM headers do not match (%(begin_pem_header)s!= "
+"%(end_pem_header)s)"
+msgstr ""
+
+#: keystone/common/pemutils.py:377
+#, python-format
+msgid "unknown pem_type: \"%s\""
+msgstr ""
+
+#: keystone/common/pemutils.py:389
+#, python-format
+msgid ""
+"failed to base64 decode %(pem_type)s PEM at position%(position)d: "
+"%(err_msg)s"
+msgstr ""
+
+#: keystone/common/utils.py:164 keystone/credential/controllers.py:44
+msgid "Invalid blob in credential"
+msgstr "BLOB inválido na credencial"
+
+#: keystone/common/wsgi.py:330
+#, python-format
+msgid "%s field is required and cannot be empty"
+msgstr ""
+
+#: keystone/common/wsgi.py:342
+#, python-format
+msgid "%s field(s) cannot be empty"
+msgstr ""
+
+#: keystone/common/wsgi.py:563
+msgid "The resource could not be found."
+msgstr "O recurso não pôde ser localizado."
+
+#: keystone/common/wsgi.py:704
+#, python-format
+msgid "Unexpected status requested for JSON Home response, %s"
+msgstr ""
+
+#: keystone/common/cache/_memcache_pool.py:113
+#, python-format
+msgid "Unable to get a connection from pool id %(id)s after %(seconds)s seconds."
+msgstr ""
+
+#: keystone/common/cache/core.py:132
+msgid "region not type dogpile.cache.CacheRegion"
+msgstr "região não é do tipo dogpile.cache.CacheRegion"
+
+#: keystone/common/cache/backends/mongo.py:231
+msgid "db_hosts value is required"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:236
+msgid "database db_name is required"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:241
+msgid "cache_collection name is required"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:252
+msgid "integer value expected for w (write concern attribute)"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:260
+msgid "replicaset_name required when use_replica is True"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:275
+msgid "integer value expected for mongo_ttl_seconds"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:301
+msgid "no ssl support available"
+msgstr ""
+
+#: keystone/common/cache/backends/mongo.py:310
+#, python-format
+msgid ""
+"Invalid ssl_cert_reqs value of %s, must be one of \"NONE\", \"OPTIONAL\","
+" \"REQUIRED\""
+msgstr ""
+
+#: keystone/common/kvs/core.py:71
+#, python-format
+msgid "Lock Timeout occurred for key, %(target)s"
+msgstr ""
+
+#: keystone/common/kvs/core.py:106
+#, python-format
+msgid "KVS region %s is already configured. Cannot reconfigure."
+msgstr ""
+
+#: keystone/common/kvs/core.py:145
+#, python-format
+msgid "Key Value Store not configured: %s"
+msgstr ""
+
+#: keystone/common/kvs/core.py:198
+msgid "`key_mangler` option must be a function reference"
+msgstr ""
+
+#: keystone/common/kvs/core.py:353
+#, python-format
+msgid "Lock key must match target key: %(lock)s != %(target)s"
+msgstr ""
+
+#: keystone/common/kvs/core.py:357
+msgid "Must be called within an active lock context."
+msgstr ""
+
+#: keystone/common/kvs/backends/memcached.py:69
+#, python-format
+msgid "Maximum lock attempts on %s occurred."
+msgstr ""
+
+#: keystone/common/kvs/backends/memcached.py:108
+#, python-format
+msgid ""
+"Backend `%(driver)s` is not a valid memcached backend. Valid drivers: "
+"%(driver_list)s"
+msgstr ""
+
+#: keystone/common/kvs/backends/memcached.py:178
+msgid "`key_mangler` functions must be callable."
+msgstr ""
+
+#: keystone/common/ldap/core.py:191
+#, python-format
+msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s"
+msgstr ""
+
+#: keystone/common/ldap/core.py:201
+#, python-format
+msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s"
+msgstr ""
+"Opção de certificado LADP TLS inválida: %(option)s. Escolha uma de: "
+"%(options)s"
+
+#: keystone/common/ldap/core.py:213
+#, python-format
+msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s"
+msgstr "Escopo LDAP inválido: %(scope)s. Escolha um de: %(options)s"
+
+#: keystone/common/ldap/core.py:588
+msgid "Invalid TLS / LDAPS combination"
+msgstr "Combinação TLS / LADPS inválida"
+
+#: keystone/common/ldap/core.py:593
+#, python-format
+msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available"
+msgstr "Opção LDAP TLS_AVAIL inválida: %s. TLS não dsponível"
+
+#: keystone/common/ldap/core.py:603
+#, python-format
+msgid "tls_cacertfile %s not found or is not a file"
+msgstr "tls_cacertfile %s não encontrada ou não é um arquivo"
+
+#: keystone/common/ldap/core.py:615
+#, python-format
+msgid "tls_cacertdir %s not found or is not a directory"
+msgstr "tls_cacertdir %s não encontrado ou não é um diretório"
+
+#: keystone/common/ldap/core.py:1325
+#, python-format
+msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s"
+msgstr ""
+
+#: keystone/common/ldap/core.py:1369
+#, python-format
+msgid "LDAP %s create"
+msgstr "Criação de LDAP %s"
+
+#: keystone/common/ldap/core.py:1374
+#, python-format
+msgid "LDAP %s update"
+msgstr "Atualização de LDAP %s"
+
+#: keystone/common/ldap/core.py:1379
+#, python-format
+msgid "LDAP %s delete"
+msgstr "Exclusão de LDAP %s"
+
+#: keystone/common/ldap/core.py:1521
+msgid ""
+"Disabling an entity where the 'enable' attribute is ignored by "
+"configuration."
+msgstr ""
+
+#: keystone/common/ldap/core.py:1532
+#, python-format
+msgid "Cannot change %(option_name)s %(attr)s"
+msgstr "Não é possível alterar %(option_name)s %(attr)s"
+
+#: keystone/common/ldap/core.py:1619
+#, python-format
+msgid "Member %(member)s is already a member of group %(group)s"
+msgstr ""
+
+#: keystone/common/sql/core.py:219
+msgid ""
+"Cannot truncate a driver call without hints list as first parameter after"
+" self "
+msgstr ""
+
+#: keystone/common/sql/core.py:410
+msgid "Duplicate Entry"
+msgstr ""
+
+#: keystone/common/sql/core.py:426
+#, python-format
+msgid "An unexpected error occurred when trying to store %s"
+msgstr ""
+
+#: keystone/common/sql/migration_helpers.py:187
+#: keystone/common/sql/migration_helpers.py:245
+#, python-format
+msgid "%s extension does not exist."
+msgstr ""
+
+#: keystone/common/validation/validators.py:54
+#, python-format
+msgid "Invalid input for field '%(path)s'. The value is '%(value)s'."
+msgstr ""
+
+#: keystone/contrib/ec2/controllers.py:318
+msgid "Token belongs to another user"
+msgstr "O token pertence à outro usuário"
+
+#: keystone/contrib/ec2/controllers.py:346
+msgid "Credential belongs to another user"
+msgstr "A credencial pertence à outro usuário"
+
+#: keystone/contrib/endpoint_filter/backends/sql.py:69
+#, python-format
+msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s"
+msgstr "Endpoint %(endpoint_id)s não encontrado no projeto %(project_id)s"
+
+#: keystone/contrib/endpoint_filter/backends/sql.py:180
+msgid "Endpoint Group Project Association not found"
+msgstr ""
+
+#: keystone/contrib/endpoint_policy/core.py:258
+#, python-format
+msgid "No policy is associated with endpoint %(endpoint_id)s."
+msgstr ""
+
+#: keystone/contrib/federation/controllers.py:274
+msgid "Missing entity ID from environment"
+msgstr ""
+
+#: keystone/contrib/federation/controllers.py:282
+msgid "Request must have an origin query parameter"
+msgstr ""
+
+#: keystone/contrib/federation/controllers.py:292
+#, python-format
+msgid "%(host)s is not a trusted dashboard host"
+msgstr ""
+
+#: keystone/contrib/federation/controllers.py:333
+msgid "Use a project scoped token when attempting to create a SAML assertion"
+msgstr ""
+
+#: keystone/contrib/federation/idp.py:454
+#, python-format
+msgid "Cannot open certificate %(cert_file)s. Reason: %(reason)s"
+msgstr ""
+
+#: keystone/contrib/federation/idp.py:521
+msgid "Ensure configuration option idp_entity_id is set."
+msgstr ""
+
+#: keystone/contrib/federation/idp.py:524
+msgid "Ensure configuration option idp_sso_endpoint is set."
+msgstr ""
+
+#: keystone/contrib/federation/idp.py:544
+msgid ""
+"idp_contact_type must be one of: [technical, other, support, "
+"administrative or billing."
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:178
+msgid "Federation token is expired"
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:208
+msgid ""
+"Could not find Identity Provider identifier in environment, check "
+"[federation] remote_id_attribute for details."
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:213
+msgid ""
+"Incoming identity provider identifier not included among the accepted "
+"identifiers."
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:501
+#, python-format
+msgid "User type %s not supported"
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:537
+#, python-format
+msgid ""
+"Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords "
+"must be specified."
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:753
+#, python-format
+msgid "Identity Provider %(idp)s is disabled"
+msgstr ""
+
+#: keystone/contrib/federation/utils.py:761
+#, python-format
+msgid "Service Provider %(sp)s is disabled"
+msgstr ""
+
+#: keystone/contrib/oauth1/controllers.py:99
+msgid "Cannot change consumer secret"
+msgstr "Não é possível alterar segredo do consumidor"
+
+#: keystone/contrib/oauth1/controllers.py:131
+msgid "Cannot list request tokens with a token issued via delegation."
+msgstr ""
+
+#: keystone/contrib/oauth1/controllers.py:192
+#: keystone/contrib/oauth1/backends/sql.py:270
+msgid "User IDs do not match"
+msgstr "ID de usuário não confere"
+
+#: keystone/contrib/oauth1/controllers.py:199
+msgid "Could not find role"
+msgstr "Não é possível encontrar role"
+
+#: keystone/contrib/oauth1/controllers.py:248
+msgid "Invalid signature"
+msgstr ""
+
+#: keystone/contrib/oauth1/controllers.py:299
+#: keystone/contrib/oauth1/controllers.py:377
+msgid "Request token is expired"
+msgstr "Token de requisição expirou"
+
+#: keystone/contrib/oauth1/controllers.py:313
+msgid "There should not be any non-oauth parameters"
+msgstr "Não deve haver nenhum parâmetro não oauth"
+
+#: keystone/contrib/oauth1/controllers.py:317
+msgid "provided consumer key does not match stored consumer key"
+msgstr ""
+"Chave de consumidor fornecida não confere com a chave de consumidor "
+"armazenada"
+
+#: keystone/contrib/oauth1/controllers.py:321
+msgid "provided verifier does not match stored verifier"
+msgstr "Verificador fornecido não confere com o verificador armazenado"
+
+#: keystone/contrib/oauth1/controllers.py:325
+msgid "provided request key does not match stored request key"
+msgstr ""
+"Chave de requisição do provedor não confere com a chave de requisição "
+"armazenada"
+
+#: keystone/contrib/oauth1/controllers.py:329
+msgid "Request Token does not have an authorizing user id"
+msgstr "Token de Requisição não possui um ID de usuário autorizado"
+
+#: keystone/contrib/oauth1/controllers.py:366
+msgid "Cannot authorize a request token with a token issued via delegation."
+msgstr ""
+
+#: keystone/contrib/oauth1/controllers.py:396
+msgid "authorizing user does not have role required"
+msgstr "Usuário autorizado não possui o role necessário"
+
+#: keystone/contrib/oauth1/controllers.py:409
+msgid "User is not a member of the requested project"
+msgstr "Usuário não é um membro do projeto requisitado"
+
+#: keystone/contrib/oauth1/backends/sql.py:91
+msgid "Consumer not found"
+msgstr "Consumidor não encontrado"
+
+#: keystone/contrib/oauth1/backends/sql.py:186
+msgid "Request token not found"
+msgstr "Token de requisição não encontrado"
+
+#: keystone/contrib/oauth1/backends/sql.py:250
+msgid "Access token not found"
+msgstr "Token de acesso não encontrado"
+
+#: keystone/contrib/revoke/controllers.py:33
+#, python-format
+msgid "invalid date format %s"
+msgstr ""
+
+#: keystone/contrib/revoke/core.py:150
+msgid ""
+"The revoke call must not have both domain_id and project_id. This is a "
+"bug in the Keystone server. The current request is aborted."
+msgstr ""
+
+#: keystone/contrib/revoke/core.py:218 keystone/token/provider.py:207
+#: keystone/token/provider.py:230 keystone/token/provider.py:296
+#: keystone/token/provider.py:303
+msgid "Failed to validate token"
+msgstr "Falha ao validar token"
+
+#: keystone/identity/controllers.py:72
+msgid "Enabled field must be a boolean"
+msgstr "Campo habilitado precisa ser um booleano"
+
+#: keystone/identity/controllers.py:98
+msgid "Enabled field should be a boolean"
+msgstr "Campo habilitado deve ser um booleano"
+
+#: keystone/identity/core.py:112
+#, python-format
+msgid "Database at /domains/%s/config"
+msgstr ""
+
+#: keystone/identity/core.py:287 keystone/identity/backends/ldap.py:59
+#: keystone/identity/backends/ldap.py:61 keystone/identity/backends/ldap.py:67
+#: keystone/identity/backends/ldap.py:69 keystone/identity/backends/sql.py:104
+#: keystone/identity/backends/sql.py:106
+msgid "Invalid user / password"
+msgstr ""
+
+#: keystone/identity/core.py:693
+#, python-format
+msgid "User is disabled: %s"
+msgstr "O usuário está desativado: %s"
+
+#: keystone/identity/core.py:735
+msgid "Cannot change user ID"
+msgstr ""
+
+#: keystone/identity/backends/ldap.py:99
+msgid "Cannot change user name"
+msgstr ""
+
+#: keystone/identity/backends/ldap.py:188 keystone/identity/backends/sql.py:188
+#: keystone/identity/backends/sql.py:206
+#, python-format
+msgid "User '%(user_id)s' not found in group '%(group_id)s'"
+msgstr ""
+
+#: keystone/identity/backends/ldap.py:339
+#, python-format
+msgid "User %(user_id)s is already a member of group %(group_id)s"
+msgstr "Usuário %(user_id)s já é membro do grupo %(group_id)s"
+
+#: keystone/models/token_model.py:61
+msgid "Found invalid token: scoped to both project and domain."
+msgstr ""
+
+#: keystone/openstack/common/versionutils.py:108
+#, python-format
+msgid ""
+"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and "
+"may be removed in %(remove_in)s."
+msgstr ""
+"%(what)s está deprecado desde %(as_of)s em favor de %(in_favor_of)s e "
+"pode ser removido em %(remove_in)s."
+
+#: keystone/openstack/common/versionutils.py:112
+#, python-format
+msgid ""
+"%(what)s is deprecated as of %(as_of)s and may be removed in "
+"%(remove_in)s. It will not be superseded."
+msgstr ""
+"%(what)s está deprecado desde %(as_of)s e pode ser removido em "
+"%(remove_in)s. Ele não será substituído."
+
+#: keystone/openstack/common/versionutils.py:116
+#, python-format
+msgid "%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s."
+msgstr ""
+
+#: keystone/openstack/common/versionutils.py:119
+#, python-format
+msgid "%(what)s is deprecated as of %(as_of)s. It will not be superseded."
+msgstr ""
+
+#: keystone/openstack/common/versionutils.py:241
+#, python-format
+msgid "Deprecated: %s"
+msgstr "Deprecado: %s"
+
+#: keystone/openstack/common/versionutils.py:259
+#, python-format
+msgid "Fatal call to deprecated config: %(msg)s"
+msgstr "Chamada fatal à configuração deprecada: %(msg)s"
+
+#: keystone/resource/controllers.py:231
+msgid ""
+"Cannot use parents_as_list and parents_as_ids query params at the same "
+"time."
+msgstr ""
+
+#: keystone/resource/controllers.py:237
+msgid ""
+"Cannot use subtree_as_list and subtree_as_ids query params at the same "
+"time."
+msgstr ""
+
+#: keystone/resource/core.py:80
+#, python-format
+msgid "max hierarchy depth reached for %s branch."
+msgstr ""
+
+#: keystone/resource/core.py:97
+msgid "cannot create a project within a different domain than its parents."
+msgstr ""
+
+#: keystone/resource/core.py:101
+#, python-format
+msgid "cannot create a project in a branch containing a disabled project: %s"
+msgstr ""
+
+#: keystone/resource/core.py:123
+#, python-format
+msgid "Domain is disabled: %s"
+msgstr "O domínio está desativado: %s"
+
+#: keystone/resource/core.py:141
+#, python-format
+msgid "Domain cannot be named %s"
+msgstr ""
+
+#: keystone/resource/core.py:144
+#, python-format
+msgid "Domain cannot have ID %s"
+msgstr ""
+
+#: keystone/resource/core.py:156
+#, python-format
+msgid "Project is disabled: %s"
+msgstr "O projeto está desativado: %s"
+
+#: keystone/resource/core.py:176
+#, python-format
+msgid "cannot enable project %s since it has disabled parents"
+msgstr ""
+
+#: keystone/resource/core.py:184
+#, python-format
+msgid "cannot disable project %s since its subtree contains enabled projects"
+msgstr ""
+
+#: keystone/resource/core.py:195
+msgid "Update of `parent_id` is not allowed."
+msgstr ""
+
+#: keystone/resource/core.py:222
+#, python-format
+msgid "cannot delete the project %s since it is not a leaf in the hierarchy."
+msgstr ""
+
+#: keystone/resource/core.py:376
+msgid "Multiple domains are not supported"
+msgstr ""
+
+#: keystone/resource/core.py:429
+msgid "delete the default domain"
+msgstr ""
+
+#: keystone/resource/core.py:440
+msgid "cannot delete a domain that is enabled, please disable it first."
+msgstr ""
+
+#: keystone/resource/core.py:841
+msgid "No options specified"
+msgstr "Nenhuma opção especificada"
+
+#: keystone/resource/core.py:847
+#, python-format
+msgid ""
+"The value of group %(group)s specified in the config should be a "
+"dictionary of options"
+msgstr ""
+
+#: keystone/resource/core.py:871
+#, python-format
+msgid ""
+"Option %(option)s found with no group specified while checking domain "
+"configuration request"
+msgstr ""
+
+#: keystone/resource/core.py:878
+#, python-format
+msgid "Group %(group)s is not supported for domain specific configurations"
+msgstr ""
+
+#: keystone/resource/core.py:885
+#, python-format
+msgid ""
+"Option %(option)s in group %(group)s is not supported for domain specific"
+" configurations"
+msgstr ""
+
+#: keystone/resource/core.py:938
+msgid "An unexpected error occurred when retrieving domain configs"
+msgstr ""
+
+#: keystone/resource/core.py:1013 keystone/resource/core.py:1097
+#: keystone/resource/core.py:1167 keystone/resource/config_backends/sql.py:70
+#, python-format
+msgid "option %(option)s in group %(group)s"
+msgstr ""
+
+#: keystone/resource/core.py:1016 keystone/resource/core.py:1102
+#: keystone/resource/core.py:1163
+#, python-format
+msgid "group %(group)s"
+msgstr ""
+
+#: keystone/resource/core.py:1018
+msgid "any options"
+msgstr ""
+
+#: keystone/resource/core.py:1062
+#, python-format
+msgid ""
+"Trying to update option %(option)s in group %(group)s, so that, and only "
+"that, option must be specified  in the config"
+msgstr ""
+
+#: keystone/resource/core.py:1067
+#, python-format
+msgid ""
+"Trying to update group %(group)s, so that, and only that, group must be "
+"specified in the config"
+msgstr ""
+
+#: keystone/resource/core.py:1076
+#, python-format
+msgid ""
+"request to update group %(group)s, but config provided contains group "
+"%(group_other)s instead"
+msgstr ""
+
+#: keystone/resource/core.py:1083
+#, python-format
+msgid ""
+"Trying to update option %(option)s in group %(group)s, but config "
+"provided contains option %(option_other)s instead"
+msgstr ""
+
+#: keystone/resource/backends/ldap.py:151
+#: keystone/resource/backends/ldap.py:159
+#: keystone/resource/backends/ldap.py:163
+msgid "Domains are read-only against LDAP"
+msgstr ""
+
+#: keystone/server/eventlet.py:77
+msgid ""
+"Running keystone via eventlet is deprecated as of Kilo in favor of "
+"running in a WSGI server (e.g. mod_wsgi). Support for keystone under "
+"eventlet will be removed in the \"M\"-Release."
+msgstr ""
+
+#: keystone/server/eventlet.py:90
+#, python-format
+msgid "Failed to start the %(name)s server"
+msgstr ""
+
+#: keystone/token/controllers.py:391
+#, python-format
+msgid "User %(u_id)s is unauthorized for tenant %(t_id)s"
+msgstr "Usuário %(u_id)s  não está autorizado para o tenant %(t_id)s"
+
+#: keystone/token/controllers.py:410 keystone/token/controllers.py:413
+msgid "Token does not belong to specified tenant."
+msgstr "O token não pertence ao tenant especificado."
+
+#: keystone/token/persistence/backends/kvs.py:133
+#, python-format
+msgid "Unknown token version %s"
+msgstr ""
+
+#: keystone/token/providers/common.py:250
+#: keystone/token/providers/common.py:355
+#, python-format
+msgid "User %(user_id)s has no access to project %(project_id)s"
+msgstr "O usuário %(user_id)s não tem acesso ao projeto %(project_id)s"
+
+#: keystone/token/providers/common.py:255
+#: keystone/token/providers/common.py:360
+#, python-format
+msgid "User %(user_id)s has no access to domain %(domain_id)s"
+msgstr "O usuário %(user_id)s não tem acesso ao domínio %(domain_id)s"
+
+#: keystone/token/providers/common.py:282
+msgid "Trustor is disabled."
+msgstr "O fiador está desativado."
+
+#: keystone/token/providers/common.py:346
+msgid "Trustee has no delegated roles."
+msgstr "Fiador não possui roles delegados."
+
+#: keystone/token/providers/common.py:407
+#, python-format
+msgid "Invalid audit info data type: %(data)s (%(type)s)"
+msgstr ""
+
+#: keystone/token/providers/common.py:435
+msgid "User is not a trustee."
+msgstr "Usuário não é confiável."
+
+#: keystone/token/providers/common.py:579
+msgid ""
+"Attempting to use OS-FEDERATION token with V2 Identity Service, use V3 "
+"Authentication"
+msgstr ""
+
+#: keystone/token/providers/common.py:597
+msgid "Domain scoped token is not supported"
+msgstr "O token de escopo de domínio não é suportado"
+
+#: keystone/token/providers/pki.py:48 keystone/token/providers/pkiz.py:30
+msgid "Unable to sign token."
+msgstr "Não é possível assinar o token."
+
+#: keystone/token/providers/fernet/core.py:215
+msgid ""
+"This is not a v2.0 Fernet token. Use v3 for trust, domain, or federated "
+"tokens."
+msgstr ""
+
+#: keystone/token/providers/fernet/token_formatters.py:189
+#, python-format
+msgid "This is not a recognized Fernet payload version: %s"
+msgstr ""
+
+#: keystone/trust/controllers.py:148
+msgid "Redelegation allowed for delegated by trust only"
+msgstr ""
+
+#: keystone/trust/controllers.py:181
+msgid "The authenticated user should match the trustor."
+msgstr ""
+
+#: keystone/trust/controllers.py:186
+msgid "At least one role should be specified."
+msgstr ""
+
+#: keystone/trust/core.py:57
+#, python-format
+msgid ""
+"Remaining redelegation depth of %(redelegation_depth)d out of allowed "
+"range of [0..%(max_count)d]"
+msgstr ""
+
+#: keystone/trust/core.py:66
+#, python-format
+msgid ""
+"Field \"remaining_uses\" is set to %(value)s while it must not be set in "
+"order to redelegate a trust"
+msgstr ""
+
+#: keystone/trust/core.py:77
+msgid "Requested expiration time is more than redelegated trust can provide"
+msgstr ""
+
+#: keystone/trust/core.py:87
+msgid "Some of requested roles are not in redelegated trust"
+msgstr ""
+
+#: keystone/trust/core.py:116
+msgid "One of the trust agents is disabled or deleted"
+msgstr ""
+
+#: keystone/trust/core.py:135
+msgid "remaining_uses must be a positive integer or null."
+msgstr ""
+
+#: keystone/trust/core.py:141
+#, python-format
+msgid ""
+"Requested redelegation depth of %(requested_count)d is greater than "
+"allowed %(max_count)d"
+msgstr ""
+
+#: keystone/trust/core.py:147
+msgid "remaining_uses must not be set if redelegation is allowed"
+msgstr ""
+
+#: keystone/trust/core.py:157
+msgid ""
+"Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting"
+" this parameter is advised."
+msgstr ""
+
diff --git a/keystone-moon/keystone/locale/ru/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/ru/LC_MESSAGES/keystone-log-critical.po
new file mode 100644 (file)
index 0000000..f8d060b
--- /dev/null
@@ -0,0 +1,26 @@
+# Translations template for keystone.
+# Copyright (C) 2014 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"PO-Revision-Date: 2014-08-31 15:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: Russian (http://www.transifex.com/projects/p/keystone/"
+"language/ru/)\n"
+"Language: ru\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n"
+"%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n"
+
+#: keystone/catalog/backends/templated.py:106
+#, python-format
+msgid "Unable to open template file %s"
+msgstr "Не удается открыть файл шаблона %s"
diff --git a/keystone-moon/keystone/locale/vi_VN/LC_MESSAGES/keystone-log-info.po b/keystone-moon/keystone/locale/vi_VN/LC_MESSAGES/keystone-log-info.po
new file mode 100644 (file)
index 0000000..bcb9ab4
--- /dev/null
@@ -0,0 +1,211 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-03-09 06:03+0000\n"
+"PO-Revision-Date: 2015-03-07 04:31+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: Vietnamese (Viet Nam) (http://www.transifex.com/projects/p/"
+"keystone/language/vi_VN/)\n"
+"Language: vi_VN\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+
+#: keystone/assignment/core.py:250
+#, python-format
+msgid "Creating the default role %s because it does not exist."
+msgstr ""
+
+#: keystone/assignment/core.py:258
+#, python-format
+msgid "Creating the default role %s failed because it was already created"
+msgstr ""
+
+#: keystone/auth/controllers.py:64
+msgid "Loading auth-plugins by class-name is deprecated."
+msgstr ""
+
+#: keystone/auth/controllers.py:106
+#, python-format
+msgid ""
+"\"expires_at\" has conflicting values %(existing)s and %(new)s.  Will use "
+"the earliest value."
+msgstr ""
+
+#: keystone/common/openssl.py:81
+#, python-format
+msgid "Running command - %s"
+msgstr ""
+
+#: keystone/common/wsgi.py:79
+msgid "No bind information present in token"
+msgstr ""
+
+#: keystone/common/wsgi.py:83
+#, python-format
+msgid "Named bind mode %s not in bind information"
+msgstr ""
+
+#: keystone/common/wsgi.py:90
+msgid "Kerberos credentials required and not present"
+msgstr ""
+
+#: keystone/common/wsgi.py:94
+msgid "Kerberos credentials do not match those in bind"
+msgstr ""
+
+#: keystone/common/wsgi.py:98
+msgid "Kerberos bind authentication successful"
+msgstr ""
+
+#: keystone/common/wsgi.py:105
+#, python-format
+msgid "Couldn't verify unknown bind: {%(bind_type)s: %(identifier)s}"
+msgstr ""
+
+#: keystone/common/environment/eventlet_server.py:103
+#, python-format
+msgid "Starting %(arg0)s on %(host)s:%(port)s"
+msgstr ""
+
+#: keystone/common/kvs/core.py:138
+#, python-format
+msgid "Adding proxy '%(proxy)s' to KVS %(name)s."
+msgstr ""
+
+#: keystone/common/kvs/core.py:188
+#, python-format
+msgid "Using %(func)s as KVS region %(name)s key_mangler"
+msgstr ""
+
+#: keystone/common/kvs/core.py:200
+#, python-format
+msgid "Using default dogpile sha1_mangle_key as KVS region %s key_mangler"
+msgstr ""
+
+#: keystone/common/kvs/core.py:210
+#, python-format
+msgid "KVS region %s key_mangler disabled."
+msgstr ""
+
+#: keystone/contrib/example/core.py:64 keystone/contrib/example/core.py:73
+#, python-format
+msgid ""
+"Received the following notification: service %(service)s, resource_type: "
+"%(resource_type)s, operation %(operation)s payload %(payload)s"
+msgstr ""
+
+#: keystone/openstack/common/eventlet_backdoor.py:146
+#, python-format
+msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
+msgstr "Eventlet backdoor lắng nghe trên %(port)s đối với tiến trình %(pid)d"
+
+#: keystone/openstack/common/service.py:173
+#, python-format
+msgid "Caught %s, exiting"
+msgstr "Bắt %s, thoát"
+
+#: keystone/openstack/common/service.py:231
+msgid "Parent process has died unexpectedly, exiting"
+msgstr "Tiến trình cha bị chết đột ngột, thoát"
+
+#: keystone/openstack/common/service.py:262
+#, python-format
+msgid "Child caught %s, exiting"
+msgstr "Tiến trình con bắt %s, thoát"
+
+#: keystone/openstack/common/service.py:301
+msgid "Forking too fast, sleeping"
+msgstr "Tạo tiến trình con quá nhanh, nghỉ"
+
+#: keystone/openstack/common/service.py:320
+#, python-format
+msgid "Started child %d"
+msgstr "Tiến trình con đã được khởi động %d "
+
+#: keystone/openstack/common/service.py:330
+#, python-format
+msgid "Starting %d workers"
+msgstr "Khởi động %d động cơ"
+
+#: keystone/openstack/common/service.py:347
+#, python-format
+msgid "Child %(pid)d killed by signal %(sig)d"
+msgstr "Tiến trình con %(pid)d bị huỷ bởi tín hiệu %(sig)d"
+
+#: keystone/openstack/common/service.py:351
+#, python-format
+msgid "Child %(pid)s exited with status %(code)d"
+msgstr "Tiến trình con %(pid)s đã thiaast với trạng thái %(code)d"
+
+#: keystone/openstack/common/service.py:390
+#, python-format
+msgid "Caught %s, stopping children"
+msgstr "Bắt %s, đang dừng tiến trình con"
+
+#: keystone/openstack/common/service.py:399
+msgid "Wait called after thread killed. Cleaning up."
+msgstr ""
+
+#: keystone/openstack/common/service.py:415
+#, python-format
+msgid "Waiting on %d children to exit"
+msgstr "Chờ đợi %d tiến trình con để thoát "
+
+#: keystone/token/persistence/backends/sql.py:279
+#, python-format
+msgid "Total expired tokens removed: %d"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:72
+msgid ""
+"[fernet_tokens] key_repository does not appear to exist; attempting to "
+"create it"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:130
+#, python-format
+msgid "Created a new key: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:143
+msgid "Key repository is already initialized; aborting."
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:179
+#, python-format
+msgid "Starting key rotation with %(count)s key files: %(list)s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:185
+#, python-format
+msgid "Current primary key is: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:187
+#, python-format
+msgid "Next primary key will be: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:197
+#, python-format
+msgid "Promoted key 0 to be the primary: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:213
+#, python-format
+msgid "Excess keys to purge: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:237
+#, python-format
+msgid "Loaded %(count)s encryption keys from: %(dir)s"
+msgstr ""
diff --git a/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-critical.po
new file mode 100644 (file)
index 0000000..a3a728e
--- /dev/null
@@ -0,0 +1,25 @@
+# Translations template for keystone.
+# Copyright (C) 2014 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"PO-Revision-Date: 2014-08-31 15:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/keystone/"
+"language/zh_CN/)\n"
+"Language: zh_CN\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+
+#: keystone/catalog/backends/templated.py:106
+#, python-format
+msgid "Unable to open template file %s"
+msgstr "无法打开模板文件 %s"
diff --git a/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-error.po b/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-error.po
new file mode 100644 (file)
index 0000000..a48b938
--- /dev/null
@@ -0,0 +1,177 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+# Xiao Xi LIU <liuxx@cn.ibm.com>, 2014
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-03-09 06:03+0000\n"
+"PO-Revision-Date: 2015-03-07 04:31+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/keystone/"
+"language/zh_CN/)\n"
+"Language: zh_CN\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+
+#: keystone/notifications.py:304
+msgid "Failed to construct notifier"
+msgstr ""
+
+#: keystone/notifications.py:389
+#, python-format
+msgid "Failed to send %(res_id)s %(event_type)s notification"
+msgstr ""
+
+#: keystone/notifications.py:606
+#, python-format
+msgid "Failed to send %(action)s %(event_type)s notification"
+msgstr ""
+
+#: keystone/catalog/core.py:62
+#, python-format
+msgid "Malformed endpoint - %(url)r is not a string"
+msgstr ""
+
+#: keystone/catalog/core.py:66
+#, python-format
+msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s"
+msgstr "端点 %(url)s 的格式不正确 - 键 %(keyerror)s 未知"
+
+#: keystone/catalog/core.py:71
+#, python-format
+msgid ""
+"Malformed endpoint '%(url)s'. The following type error occurred during "
+"string substitution: %(typeerror)s"
+msgstr ""
+"端点 '%(url)s' 的格式不正确。在字符串替换时发生以下类型错误:%(typeerror)s"
+
+#: keystone/catalog/core.py:77
+#, python-format
+msgid ""
+"Malformed endpoint %s - incomplete format (are you missing a type notifier ?)"
+msgstr "端点 %s 的格式不完整 - (是否缺少了类型通告者?)"
+
+#: keystone/common/openssl.py:93
+#, python-format
+msgid "Command %(to_exec)s exited with %(retcode)s- %(output)s"
+msgstr "命令 %(to_exec)s 已退出,退出码及输出为 %(retcode)s- %(output)s"
+
+#: keystone/common/openssl.py:121
+#, python-format
+msgid "Failed to remove file %(file_path)r: %(error)s"
+msgstr "无法删除文件%(file_path)r: %(error)s"
+
+#: keystone/common/utils.py:239
+msgid ""
+"Error setting up the debug environment. Verify that the option --debug-url "
+"has the format <host>:<port> and that a debugger processes is listening on "
+"that port."
+msgstr ""
+"设置调试环境出错。请确保选项--debug-url 的格式是这样的<host>:<port> ,和确保"
+"有一个调试进程正在监听那个端口"
+
+#: keystone/common/cache/core.py:100
+#, python-format
+msgid ""
+"Unable to build cache config-key. Expected format \"<argname>:<value>\". "
+"Skipping unknown format: %s"
+msgstr ""
+
+#: keystone/common/environment/eventlet_server.py:99
+#, python-format
+msgid "Could not bind to %(host)s:%(port)s"
+msgstr "无法绑定至 %(host)s:%(port)s"
+
+#: keystone/common/environment/eventlet_server.py:185
+msgid "Server error"
+msgstr "服务器报错"
+
+#: keystone/contrib/endpoint_policy/core.py:129
+#: keystone/contrib/endpoint_policy/core.py:228
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found in region tree - %(region_id)s."
+msgstr "在域树- %(region_id)s 中发现循环引用或重复项。"
+
+#: keystone/contrib/federation/idp.py:410
+#, python-format
+msgid "Error when signing assertion, reason: %(reason)s"
+msgstr "对断言进行签名时出错,原因:%(reason)s"
+
+#: keystone/contrib/oauth1/core.py:136
+msgid "Cannot retrieve Authorization headers"
+msgstr ""
+
+#: keystone/openstack/common/loopingcall.py:95
+msgid "in fixed duration looping call"
+msgstr "在固定时段内循环调用"
+
+#: keystone/openstack/common/loopingcall.py:138
+msgid "in dynamic looping call"
+msgstr "在动态循环调用中"
+
+#: keystone/openstack/common/service.py:268
+msgid "Unhandled exception"
+msgstr "存在未处理的异常"
+
+#: keystone/resource/core.py:477
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found projects hierarchy - "
+"%(project_id)s."
+msgstr ""
+
+#: keystone/resource/core.py:939
+#, python-format
+msgid ""
+"Unexpected results in response for domain config - %(count)s responses, "
+"first option is %(option)s, expected option %(expected)s"
+msgstr ""
+
+#: keystone/resource/backends/sql.py:102 keystone/resource/backends/sql.py:121
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found in projects hierarchy - "
+"%(project_id)s."
+msgstr ""
+
+#: keystone/token/provider.py:292
+#, python-format
+msgid "Unexpected error or malformed token determining token expiry: %s"
+msgstr ""
+
+#: keystone/token/persistence/backends/kvs.py:226
+#, python-format
+msgid ""
+"Reinitializing revocation list due to error in loading revocation list from "
+"backend.  Expected `list` type got `%(type)s`. Old revocation list data: "
+"%(list)r"
+msgstr ""
+
+#: keystone/token/providers/common.py:611
+msgid "Failed to validate token"
+msgstr "token验证失败"
+
+#: keystone/token/providers/pki.py:47
+msgid "Unable to sign token"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:38
+#, python-format
+msgid ""
+"Either [fernet_tokens] key_repository does not exist or Keystone does not "
+"have sufficient permission to access it: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:79
+msgid ""
+"Failed to create [fernet_tokens] key_repository: either it already exists or "
+"you don't have sufficient permissions to create it"
+msgstr ""
diff --git a/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-info.po b/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-info.po
new file mode 100644 (file)
index 0000000..0e848ee
--- /dev/null
@@ -0,0 +1,215 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+# Xiao Xi LIU <liuxx@cn.ibm.com>, 2014
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2015-03-09 06:03+0000\n"
+"PO-Revision-Date: 2015-03-07 08:47+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/keystone/"
+"language/zh_CN/)\n"
+"Language: zh_CN\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+
+#: keystone/assignment/core.py:250
+#, python-format
+msgid "Creating the default role %s because it does not exist."
+msgstr "正在创建默认角色%s,因为它之前不存在。"
+
+#: keystone/assignment/core.py:258
+#, python-format
+msgid "Creating the default role %s failed because it was already created"
+msgstr ""
+
+#: keystone/auth/controllers.py:64
+msgid "Loading auth-plugins by class-name is deprecated."
+msgstr "通过class-name(类名)加载auth-plugins(认证插件)的方式已被弃用。"
+
+#: keystone/auth/controllers.py:106
+#, python-format
+msgid ""
+"\"expires_at\" has conflicting values %(existing)s and %(new)s.  Will use "
+"the earliest value."
+msgstr ""
+"\"expires_at\" 被赋予矛盾的值: %(existing)s 和 %(new)s。将采用时间上较早的那"
+"个值。"
+
+#: keystone/common/openssl.py:81
+#, python-format
+msgid "Running command - %s"
+msgstr "正在运行命令 - %s"
+
+#: keystone/common/wsgi.py:79
+msgid "No bind information present in token"
+msgstr "令牌中暂无绑定信息"
+
+#: keystone/common/wsgi.py:83
+#, python-format
+msgid "Named bind mode %s not in bind information"
+msgstr "在绑定信息中没有命名绑定模式%s"
+
+#: keystone/common/wsgi.py:90
+msgid "Kerberos credentials required and not present"
+msgstr "没有所需的Kerberos凭证"
+
+#: keystone/common/wsgi.py:94
+msgid "Kerberos credentials do not match those in bind"
+msgstr "在绑定中没有匹配的Kerberos凭证"
+
+#: keystone/common/wsgi.py:98
+msgid "Kerberos bind authentication successful"
+msgstr "Kerberos绑定认证成功"
+
+#: keystone/common/wsgi.py:105
+#, python-format
+msgid "Couldn't verify unknown bind: {%(bind_type)s: %(identifier)s}"
+msgstr "不能验证未知绑定: {%(bind_type)s: %(identifier)s}"
+
+#: keystone/common/environment/eventlet_server.py:103
+#, python-format
+msgid "Starting %(arg0)s on %(host)s:%(port)s"
+msgstr "正在 %(host)s:%(port)s 上启动 %(arg0)s"
+
+#: keystone/common/kvs/core.py:138
+#, python-format
+msgid "Adding proxy '%(proxy)s' to KVS %(name)s."
+msgstr "正在将代理'%(proxy)s'加入KVS %(name)s 中。"
+
+#: keystone/common/kvs/core.py:188
+#, python-format
+msgid "Using %(func)s as KVS region %(name)s key_mangler"
+msgstr "使用 %(func)s 作为KVS域 %(name)s 的key_mangler处理函数"
+
+#: keystone/common/kvs/core.py:200
+#, python-format
+msgid "Using default dogpile sha1_mangle_key as KVS region %s key_mangler"
+msgstr ""
+"使用默认的dogpile sha1_mangle_key函数作为KVS域 %s 的key_mangler处理函数"
+
+#: keystone/common/kvs/core.py:210
+#, python-format
+msgid "KVS region %s key_mangler disabled."
+msgstr "KVS域 %s 的key_mangler处理函数被禁用。"
+
+#: keystone/contrib/example/core.py:64 keystone/contrib/example/core.py:73
+#, python-format
+msgid ""
+"Received the following notification: service %(service)s, resource_type: "
+"%(resource_type)s, operation %(operation)s payload %(payload)s"
+msgstr ""
+
+#: keystone/openstack/common/eventlet_backdoor.py:146
+#, python-format
+msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
+msgstr "携程为进程 %(pid)d 在后台监听 %(port)s "
+
+#: keystone/openstack/common/service.py:173
+#, python-format
+msgid "Caught %s, exiting"
+msgstr "捕获到 %s,正在退出"
+
+#: keystone/openstack/common/service.py:231
+msgid "Parent process has died unexpectedly, exiting"
+msgstr "父进程已意外终止,正在退出"
+
+#: keystone/openstack/common/service.py:262
+#, python-format
+msgid "Child caught %s, exiting"
+msgstr "子代捕获 %s,正在退出"
+
+#: keystone/openstack/common/service.py:301
+msgid "Forking too fast, sleeping"
+msgstr "派生速度太快,正在休眠"
+
+#: keystone/openstack/common/service.py:320
+#, python-format
+msgid "Started child %d"
+msgstr "已启动子代 %d"
+
+#: keystone/openstack/common/service.py:330
+#, python-format
+msgid "Starting %d workers"
+msgstr "正在启动 %d 工作程序"
+
+#: keystone/openstack/common/service.py:347
+#, python-format
+msgid "Child %(pid)d killed by signal %(sig)d"
+msgstr "信号 %(sig)d 已终止子代 %(pid)d"
+
+#: keystone/openstack/common/service.py:351
+#, python-format
+msgid "Child %(pid)s exited with status %(code)d"
+msgstr "子代 %(pid)s 已退出,状态为 %(code)d"
+
+#: keystone/openstack/common/service.py:390
+#, python-format
+msgid "Caught %s, stopping children"
+msgstr "捕获到 %s,正在停止子代"
+
+#: keystone/openstack/common/service.py:399
+msgid "Wait called after thread killed. Cleaning up."
+msgstr "线程结束,正在清理"
+
+#: keystone/openstack/common/service.py:415
+#, python-format
+msgid "Waiting on %d children to exit"
+msgstr "正在等待 %d 个子代退出"
+
+#: keystone/token/persistence/backends/sql.py:279
+#, python-format
+msgid "Total expired tokens removed: %d"
+msgstr "被移除的失效令牌总数:%d"
+
+#: keystone/token/providers/fernet/utils.py:72
+msgid ""
+"[fernet_tokens] key_repository does not appear to exist; attempting to "
+"create it"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:130
+#, python-format
+msgid "Created a new key: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:143
+msgid "Key repository is already initialized; aborting."
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:179
+#, python-format
+msgid "Starting key rotation with %(count)s key files: %(list)s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:185
+#, python-format
+msgid "Current primary key is: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:187
+#, python-format
+msgid "Next primary key will be: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:197
+#, python-format
+msgid "Promoted key 0 to be the primary: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:213
+#, python-format
+msgid "Excess keys to purge: %s"
+msgstr ""
+
+#: keystone/token/providers/fernet/utils.py:237
+#, python-format
+msgid "Loaded %(count)s encryption keys from: %(dir)s"
+msgstr ""
diff --git a/keystone-moon/keystone/locale/zh_TW/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/zh_TW/LC_MESSAGES/keystone-log-critical.po
new file mode 100644 (file)
index 0000000..b0ff57c
--- /dev/null
@@ -0,0 +1,25 @@
+# Translations template for keystone.
+# Copyright (C) 2014 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: Keystone\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
+"POT-Creation-Date: 2014-09-07 06:06+0000\n"
+"PO-Revision-Date: 2014-08-31 15:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: Chinese (Taiwan) (http://www.transifex.com/projects/p/"
+"keystone/language/zh_TW/)\n"
+"Language: zh_TW\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+
+#: keystone/catalog/backends/templated.py:106
+#, python-format
+msgid "Unable to open template file %s"
+msgstr "無法開啟範本檔 %s"
diff --git a/keystone-moon/keystone/middleware/__init__.py b/keystone-moon/keystone/middleware/__init__.py
new file mode 100644 (file)
index 0000000..efbaa7c
--- /dev/null
@@ -0,0 +1,15 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.middleware.core import *  # noqa
diff --git a/keystone-moon/keystone/middleware/core.py b/keystone-moon/keystone/middleware/core.py
new file mode 100644 (file)
index 0000000..bf86cd2
--- /dev/null
@@ -0,0 +1,240 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_middleware import sizelimit
+from oslo_serialization import jsonutils
+import six
+
+from keystone.common import authorization
+from keystone.common import wsgi
+from keystone import exception
+from keystone.i18n import _LW
+from keystone.models import token_model
+from keystone.openstack.common import versionutils
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+# Header used to transmit the auth token
+AUTH_TOKEN_HEADER = 'X-Auth-Token'
+
+
+# Header used to transmit the subject token
+SUBJECT_TOKEN_HEADER = 'X-Subject-Token'
+
+
+# Environment variable used to pass the request context
+CONTEXT_ENV = wsgi.CONTEXT_ENV
+
+
+# Environment variable used to pass the request params
+PARAMS_ENV = wsgi.PARAMS_ENV
+
+
+class TokenAuthMiddleware(wsgi.Middleware):
+    def process_request(self, request):
+        token = request.headers.get(AUTH_TOKEN_HEADER)
+        context = request.environ.get(CONTEXT_ENV, {})
+        context['token_id'] = token
+        if SUBJECT_TOKEN_HEADER in request.headers:
+            context['subject_token_id'] = (
+                request.headers.get(SUBJECT_TOKEN_HEADER))
+        request.environ[CONTEXT_ENV] = context
+
+
+class AdminTokenAuthMiddleware(wsgi.Middleware):
+    """A trivial filter that checks for a pre-defined admin token.
+
+    Sets 'is_admin' to true in the context, expected to be checked by
+    methods that are admin-only.
+
+    """
+
+    def process_request(self, request):
+        token = request.headers.get(AUTH_TOKEN_HEADER)
+        context = request.environ.get(CONTEXT_ENV, {})
+        context['is_admin'] = (token == CONF.admin_token)
+        request.environ[CONTEXT_ENV] = context
+
+
+class PostParamsMiddleware(wsgi.Middleware):
+    """Middleware to allow method arguments to be passed as POST parameters.
+
+    Filters out the parameters `self`, `context` and anything beginning with
+    an underscore.
+
+    """
+
+    def process_request(self, request):
+        params_parsed = request.params
+        params = {}
+        for k, v in six.iteritems(params_parsed):
+            if k in ('self', 'context'):
+                continue
+            if k.startswith('_'):
+                continue
+            params[k] = v
+
+        request.environ[PARAMS_ENV] = params
+
+
+class JsonBodyMiddleware(wsgi.Middleware):
+    """Middleware to allow method arguments to be passed as serialized JSON.
+
+    Accepting arguments as JSON is useful for accepting data that may be more
+    complex than simple primitives.
+
+    Filters out the parameters `self`, `context` and anything beginning with
+    an underscore.
+
+    """
+    def process_request(self, request):
+        # Abort early if we don't have any work to do
+        params_json = request.body
+        if not params_json:
+            return
+
+        # Reject unrecognized content types. Empty string indicates
+        # the client did not explicitly set the header
+        if request.content_type not in ('application/json', ''):
+            e = exception.ValidationError(attribute='application/json',
+                                          target='Content-Type header')
+            return wsgi.render_exception(e, request=request)
+
+        params_parsed = {}
+        try:
+            params_parsed = jsonutils.loads(params_json)
+        except ValueError:
+            e = exception.ValidationError(attribute='valid JSON',
+                                          target='request body')
+            return wsgi.render_exception(e, request=request)
+        finally:
+            if not params_parsed:
+                params_parsed = {}
+
+        if not isinstance(params_parsed, dict):
+            e = exception.ValidationError(attribute='valid JSON object',
+                                          target='request body')
+            return wsgi.render_exception(e, request=request)
+
+        params = {}
+        for k, v in six.iteritems(params_parsed):
+            if k in ('self', 'context'):
+                continue
+            if k.startswith('_'):
+                continue
+            params[k] = v
+
+        request.environ[PARAMS_ENV] = params
+
+
+class XmlBodyMiddleware(wsgi.Middleware):
+    """De/serialize XML to/from JSON."""
+
+    def print_warning(self):
+        LOG.warning(_LW('XML support has been removed as of the Kilo release '
+                        'and should not be referenced or used in deployment. '
+                        'Please remove references to XmlBodyMiddleware from '
+                        'your configuration. This compatibility stub will be '
+                        'removed in the L release'))
+
+    def __init__(self, *args, **kwargs):
+        super(XmlBodyMiddleware, self).__init__(*args, **kwargs)
+        self.print_warning()
+
+
+class XmlBodyMiddlewareV2(XmlBodyMiddleware):
+    """De/serialize XML to/from JSON for v2.0 API."""
+
+    def __init__(self, *args, **kwargs):
+        pass
+
+
+class XmlBodyMiddlewareV3(XmlBodyMiddleware):
+    """De/serialize XML to/from JSON for v3 API."""
+
+    def __init__(self, *args, **kwargs):
+        pass
+
+
+class NormalizingFilter(wsgi.Middleware):
+    """Middleware filter to handle URL normalization."""
+
+    def process_request(self, request):
+        """Normalizes URLs."""
+        # Removes a trailing slash from the given path, if any.
+        if (len(request.environ['PATH_INFO']) > 1 and
+                request.environ['PATH_INFO'][-1] == '/'):
+            request.environ['PATH_INFO'] = request.environ['PATH_INFO'][:-1]
+        # Rewrites path to root if no path is given.
+        elif not request.environ['PATH_INFO']:
+            request.environ['PATH_INFO'] = '/'
+
+
+class RequestBodySizeLimiter(sizelimit.RequestBodySizeLimiter):
+    @versionutils.deprecated(
+        versionutils.deprecated.KILO,
+        in_favor_of='oslo_middleware.sizelimit.RequestBodySizeLimiter',
+        remove_in=+1,
+        what='keystone.middleware.RequestBodySizeLimiter')
+    def __init__(self, *args, **kwargs):
+        super(RequestBodySizeLimiter, self).__init__(*args, **kwargs)
+
+
+class AuthContextMiddleware(wsgi.Middleware):
+    """Build the authentication context from the request auth token."""
+
+    def _build_auth_context(self, request):
+        token_id = request.headers.get(AUTH_TOKEN_HEADER).strip()
+
+        if token_id == CONF.admin_token:
+            # NOTE(gyee): no need to proceed any further as the special admin
+            # token is being handled by AdminTokenAuthMiddleware. This code
+            # will not be impacted even if AdminTokenAuthMiddleware is removed
+            # from the pipeline as "is_admin" is default to "False". This code
+            # is independent of AdminTokenAuthMiddleware.
+            return {}
+
+        context = {'token_id': token_id}
+        context['environment'] = request.environ
+
+        try:
+            token_ref = token_model.KeystoneToken(
+                token_id=token_id,
+                token_data=self.token_provider_api.validate_token(token_id))
+            # TODO(gyee): validate_token_bind should really be its own
+            # middleware
+            wsgi.validate_token_bind(context, token_ref)
+            return authorization.token_to_auth_context(token_ref)
+        except exception.TokenNotFound:
+            LOG.warning(_LW('RBAC: Invalid token'))
+            raise exception.Unauthorized()
+
+    def process_request(self, request):
+        if AUTH_TOKEN_HEADER not in request.headers:
+            LOG.debug(('Auth token not in the request header. '
+                       'Will not build auth context.'))
+            return
+
+        if authorization.AUTH_CONTEXT_ENV in request.environ:
+            msg = _LW('Auth context already exists in the request environment')
+            LOG.warning(msg)
+            return
+
+        auth_context = self._build_auth_context(request)
+        LOG.debug('RBAC: auth_context: %s', auth_context)
+        request.environ[authorization.AUTH_CONTEXT_ENV] = auth_context
diff --git a/keystone-moon/keystone/middleware/ec2_token.py b/keystone-moon/keystone/middleware/ec2_token.py
new file mode 100644 (file)
index 0000000..771b74f
--- /dev/null
@@ -0,0 +1,44 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Starting point for routing EC2 requests.
+
+The EC2 Token Middleware has been deprecated as of Juno. It has been moved into
+keystonemiddleware, `keystonemiddleware.ec2_token`.
+
+"""
+
+from keystonemiddleware import ec2_token
+
+from keystone.openstack.common import versionutils
+
+
+class EC2Token(ec2_token.EC2Token):
+
+    @versionutils.deprecated(
+        versionutils.deprecated.JUNO,
+        in_favor_of='keystonemiddleware.ec2_token.EC2Token',
+        remove_in=+2,
+        what='keystone.middleware.ec2_token.EC2Token')
+    def __init__(self, *args, **kwargs):
+        super(EC2Token, self).__init__(*args, **kwargs)
+
+
+filter_factory = ec2_token.filter_factory
+app_factory = ec2_token.app_factory
+keystone_ec2_opts = ec2_token.keystone_ec2_opts
diff --git a/keystone-moon/keystone/models/__init__.py b/keystone-moon/keystone/models/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/models/token_model.py b/keystone-moon/keystone/models/token_model.py
new file mode 100644 (file)
index 0000000..3be22b9
--- /dev/null
@@ -0,0 +1,335 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unified in-memory token model."""
+
+from keystoneclient.common import cms
+from oslo_config import cfg
+from oslo_utils import timeutils
+import six
+
+from keystone.contrib import federation
+from keystone import exception
+from keystone.i18n import _
+
+
+CONF = cfg.CONF
+# supported token versions
+V2 = 'v2.0'
+V3 = 'v3.0'
+VERSIONS = frozenset([V2, V3])
+
+
+def _parse_and_normalize_time(time_data):
+    if isinstance(time_data, six.string_types):
+        time_data = timeutils.parse_isotime(time_data)
+    return timeutils.normalize_time(time_data)
+
+
+class KeystoneToken(dict):
+    """An in-memory representation that unifies v2 and v3 tokens."""
+    # TODO(morganfainberg): Align this in-memory representation with the
+    # objects in keystoneclient. This object should be eventually updated
+    # to be the source of token data with the ability to emit any version
+    # of the token instead of only consuming the token dict and providing
+    # property accessors for the underlying data.
+
+    def __init__(self, token_id, token_data):
+        self.token_data = token_data
+        if 'access' in token_data:
+            super(KeystoneToken, self).__init__(**token_data['access'])
+            self.version = V2
+        elif 'token' in token_data and 'methods' in token_data['token']:
+            super(KeystoneToken, self).__init__(**token_data['token'])
+            self.version = V3
+        else:
+            raise exception.UnsupportedTokenVersionException()
+        self.token_id = token_id
+        self.short_id = cms.cms_hash_token(token_id,
+                                           mode=CONF.token.hash_algorithm)
+
+        if self.project_scoped and self.domain_scoped:
+            raise exception.UnexpectedError(_('Found invalid token: scoped to '
+                                              'both project and domain.'))
+
+    def __repr__(self):
+        desc = ('<%(type)s (audit_id=%(audit_id)s, '
+                'audit_chain_id=%(audit_chain_id)s) at %(loc)s>')
+        return desc % {'type': self.__class__.__name__,
+                       'audit_id': self.audit_id,
+                       'audit_chain_id': self.audit_chain_id,
+                       'loc': hex(id(self))}
+
+    @property
+    def expires(self):
+        if self.version is V3:
+            expires_at = self['expires_at']
+        else:
+            expires_at = self['token']['expires']
+        return _parse_and_normalize_time(expires_at)
+
+    @property
+    def issued(self):
+        if self.version is V3:
+            issued_at = self['issued_at']
+        else:
+            issued_at = self['token']['issued_at']
+        return _parse_and_normalize_time(issued_at)
+
+    @property
+    def audit_id(self):
+        if self.version is V3:
+            return self.get('audit_ids', [None])[0]
+        return self['token'].get('audit_ids', [None])[0]
+
+    @property
+    def audit_chain_id(self):
+        if self.version is V3:
+            return self.get('audit_ids', [None])[-1]
+        return self['token'].get('audit_ids', [None])[-1]
+
+    @property
+    def auth_token(self):
+        return self.token_id
+
+    @property
+    def user_id(self):
+        return self['user']['id']
+
+    @property
+    def user_name(self):
+        return self['user']['name']
+
+    @property
+    def user_domain_name(self):
+        try:
+            if self.version == V3:
+                return self['user']['domain']['name']
+            elif 'user' in self:
+                return "Default"
+        except KeyError:
+            # Do not raise KeyError, raise UnexpectedError
+            pass
+        raise exception.UnexpectedError()
+
+    @property
+    def user_domain_id(self):
+        try:
+            if self.version == V3:
+                return self['user']['domain']['id']
+            elif 'user' in self:
+                return CONF.identity.default_domain_id
+        except KeyError:
+            # Do not raise KeyError, raise UnexpectedError
+            pass
+        raise exception.UnexpectedError()
+
+    @property
+    def domain_id(self):
+        if self.version is V3:
+            try:
+                return self['domain']['id']
+            except KeyError:
+                # Do not raise KeyError, raise UnexpectedError
+                raise exception.UnexpectedError()
+        # No domain scoped tokens in V2.
+        raise NotImplementedError()
+
+    @property
+    def domain_name(self):
+        if self.version is V3:
+            try:
+                return self['domain']['name']
+            except KeyError:
+                # Do not raise KeyError, raise UnexpectedError
+                raise exception.UnexpectedError()
+        # No domain scoped tokens in V2.
+        raise NotImplementedError()
+
+    @property
+    def project_id(self):
+        try:
+            if self.version is V3:
+                return self['project']['id']
+            else:
+                return self['token']['tenant']['id']
+        except KeyError:
+            # Do not raise KeyError, raise UnexpectedError
+            raise exception.UnexpectedError()
+
+    @property
+    def project_name(self):
+        try:
+            if self.version is V3:
+                return self['project']['name']
+            else:
+                return self['token']['tenant']['name']
+        except KeyError:
+            # Do not raise KeyError, raise UnexpectedError
+            raise exception.UnexpectedError()
+
+    @property
+    def project_domain_id(self):
+        try:
+            if self.version is V3:
+                return self['project']['domain']['id']
+            elif 'tenant' in self['token']:
+                return CONF.identity.default_domain_id
+        except KeyError:
+            # Do not raise KeyError, raise UnexpectedError
+            pass
+
+        raise exception.UnexpectedError()
+
+    @property
+    def project_domain_name(self):
+        try:
+            if self.version is V3:
+                return self['project']['domain']['name']
+            if 'tenant' in self['token']:
+                return 'Default'
+        except KeyError:
+            # Do not raise KeyError, raise UnexpectedError
+            pass
+
+        raise exception.UnexpectedError()
+
+    @property
+    def project_scoped(self):
+        if self.version is V3:
+            return 'project' in self
+        else:
+            return 'tenant' in self['token']
+
+    @property
+    def domain_scoped(self):
+        if self.version is V3:
+            return 'domain' in self
+        return False
+
+    @property
+    def scoped(self):
+        return self.project_scoped or self.domain_scoped
+
+    @property
+    def trust_id(self):
+        if self.version is V3:
+            return self.get('OS-TRUST:trust', {}).get('id')
+        else:
+            return self.get('trust', {}).get('id')
+
+    @property
+    def trust_scoped(self):
+        if self.version is V3:
+            return 'OS-TRUST:trust' in self
+        else:
+            return 'trust' in self
+
+    @property
+    def trustee_user_id(self):
+        if self.version is V3:
+            return self.get(
+                'OS-TRUST:trust', {}).get('trustee_user_id')
+        else:
+            return self.get('trust', {}).get('trustee_user_id')
+
+    @property
+    def trustor_user_id(self):
+        if self.version is V3:
+            return self.get(
+                'OS-TRUST:trust', {}).get('trustor_user_id')
+        else:
+            return self.get('trust', {}).get('trustor_user_id')
+
+    @property
+    def trust_impersonation(self):
+        if self.version is V3:
+            return self.get('OS-TRUST:trust', {}).get('impersonation')
+        else:
+            return self.get('trust', {}).get('impersonation')
+
+    @property
+    def oauth_scoped(self):
+        return 'OS-OAUTH1' in self
+
+    @property
+    def oauth_access_token_id(self):
+        if self.version is V3 and self.oauth_scoped:
+            return self['OS-OAUTH1']['access_token_id']
+        return None
+
+    @property
+    def oauth_consumer_id(self):
+        if self.version is V3 and self.oauth_scoped:
+            return self['OS-OAUTH1']['consumer_id']
+        return None
+
+    @property
+    def role_ids(self):
+        if self.version is V3:
+            return [r['id'] for r in self.get('roles', [])]
+        else:
+            return self.get('metadata', {}).get('roles', [])
+
+    @property
+    def role_names(self):
+        if self.version is V3:
+            return [r['name'] for r in self.get('roles', [])]
+        else:
+            return [r['name'] for r in self['user'].get('roles', [])]
+
+    @property
+    def bind(self):
+        if self.version is V3:
+            return self.get('bind')
+        return self.get('token', {}).get('bind')
+
+    @property
+    def is_federated_user(self):
+        try:
+            return self.version is V3 and federation.FEDERATION in self['user']
+        except KeyError:
+            raise exception.UnexpectedError()
+
+    @property
+    def federation_group_ids(self):
+        if self.is_federated_user:
+            if self.version is V3:
+                try:
+                    groups = self['user'][federation.FEDERATION].get(
+                        'groups', [])
+                    return [g['id'] for g in groups]
+                except KeyError:
+                    raise exception.UnexpectedError()
+        return []
+
+    @property
+    def federation_idp_id(self):
+        if self.version is not V3 or not self.is_federated_user:
+            return None
+        return self['user'][federation.FEDERATION]['identity_provider']['id']
+
+    @property
+    def federation_protocol_id(self):
+        if self.version is V3 and self.is_federated_user:
+            return self['user'][federation.FEDERATION]['protocol']['id']
+        return None
+
+    @property
+    def metadata(self):
+        return self.get('metadata', {})
+
+    @property
+    def methods(self):
+        if self.version is V3:
+            return self.get('methods', [])
+        return []
diff --git a/keystone-moon/keystone/notifications.py b/keystone-moon/keystone/notifications.py
new file mode 100644 (file)
index 0000000..4a1cd33
--- /dev/null
@@ -0,0 +1,686 @@
+# Copyright 2013 IBM Corp.
+#
+#   Licensed under the Apache License, Version 2.0 (the "License"); you may
+#   not use this file except in compliance with the License. You may obtain
+#   a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#   WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#   License for the specific language governing permissions and limitations
+#   under the License.
+
+"""Notifications module for OpenStack Identity Service resources"""
+
+import collections
+import inspect
+import logging
+import socket
+
+from oslo_config import cfg
+from oslo_log import log
+import oslo_messaging
+import pycadf
+from pycadf import cadftaxonomy as taxonomy
+from pycadf import cadftype
+from pycadf import credential
+from pycadf import eventfactory
+from pycadf import resource
+
+from keystone.i18n import _, _LE
+
+
+notifier_opts = [
+    cfg.StrOpt('default_publisher_id',
+               help='Default publisher_id for outgoing notifications'),
+    cfg.StrOpt('notification_format', default='basic',
+               help='Define the notification format for Identity Service '
+                    'events. A "basic" notification has information about '
+                    'the resource being operated on. A "cadf" notification '
+                    'has the same information, as well as information about '
+                    'the initiator of the event. Valid options are: basic '
+                    'and cadf'),
+]
+
+config_section = None
+list_opts = lambda: [(config_section, notifier_opts), ]
+
+LOG = log.getLogger(__name__)
+# NOTE(gyee): actions that can be notified. One must update this list whenever
+# a new action is supported.
+_ACTIONS = collections.namedtuple(
+    'NotificationActions',
+    'created, deleted, disabled, updated, internal')
+ACTIONS = _ACTIONS(created='created', deleted='deleted', disabled='disabled',
+                   updated='updated', internal='internal')
+
+CADF_TYPE_MAP = {
+    'group': taxonomy.SECURITY_GROUP,
+    'project': taxonomy.SECURITY_PROJECT,
+    'role': taxonomy.SECURITY_ROLE,
+    'user': taxonomy.SECURITY_ACCOUNT_USER,
+    'domain': taxonomy.SECURITY_DOMAIN,
+    'region': taxonomy.SECURITY_REGION,
+    'endpoint': taxonomy.SECURITY_ENDPOINT,
+    'service': taxonomy.SECURITY_SERVICE,
+    'policy': taxonomy.SECURITY_POLICY,
+    'OS-TRUST:trust': taxonomy.SECURITY_TRUST,
+    'OS-OAUTH1:access_token': taxonomy.SECURITY_CREDENTIAL,
+    'OS-OAUTH1:request_token': taxonomy.SECURITY_CREDENTIAL,
+    'OS-OAUTH1:consumer': taxonomy.SECURITY_ACCOUNT,
+}
+
+SAML_AUDIT_TYPE = 'http://docs.oasis-open.org/security/saml/v2.0'
+# resource types that can be notified
+_SUBSCRIBERS = {}
+_notifier = None
+SERVICE = 'identity'
+
+
+CONF = cfg.CONF
+CONF.register_opts(notifier_opts)
+
+# NOTE(morganfainberg): Special case notifications that are only used
+# internally for handling token persistence token deletions
+INVALIDATE_USER_TOKEN_PERSISTENCE = 'invalidate_user_tokens'
+INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE = 'invalidate_user_project_tokens'
+INVALIDATE_USER_OAUTH_CONSUMER_TOKENS = 'invalidate_user_consumer_tokens'
+
+
+class Audit(object):
+    """Namespace for audit notification functions.
+
+    This is a namespace object to contain all of the direct notification
+    functions utilized for ``Manager`` methods.
+    """
+
+    @classmethod
+    def _emit(cls, operation, resource_type, resource_id, initiator, public):
+        """Directly send an event notification.
+
+        :param operation: one of the values from ACTIONS
+        :param resource_type: type of resource being affected
+        :param resource_id: ID of the resource affected
+        :param initiator: CADF representation of the user that created the
+                          request
+        :param public: If True (default), the event will be sent to the
+                       notifier API.  If False, the event will only be sent via
+                       notify_event_callbacks to in process listeners
+        """
+        # NOTE(stevemar): the _send_notification function is
+        # overloaded, it's used to register callbacks and to actually
+        # send the notification externally. Thus, we should check
+        # the desired notification format in the function instead
+        # of before it.
+        _send_notification(
+            operation,
+            resource_type,
+            resource_id,
+            public=public)
+
+        if CONF.notification_format == 'cadf' and public:
+            outcome = taxonomy.OUTCOME_SUCCESS
+            _create_cadf_payload(operation, resource_type, resource_id,
+                                 outcome, initiator)
+
+    @classmethod
+    def created(cls, resource_type, resource_id, initiator=None,
+                public=True):
+        cls._emit(ACTIONS.created, resource_type, resource_id, initiator,
+                  public)
+
+    @classmethod
+    def updated(cls, resource_type, resource_id, initiator=None,
+                public=True):
+        cls._emit(ACTIONS.updated, resource_type, resource_id, initiator,
+                  public)
+
+    @classmethod
+    def disabled(cls, resource_type, resource_id, initiator=None,
+                 public=True):
+        cls._emit(ACTIONS.disabled, resource_type, resource_id, initiator,
+                  public)
+
+    @classmethod
+    def deleted(cls, resource_type, resource_id, initiator=None,
+                public=True):
+        cls._emit(ACTIONS.deleted, resource_type, resource_id, initiator,
+                  public)
+
+
+class ManagerNotificationWrapper(object):
+    """Send event notifications for ``Manager`` methods.
+
+    Sends a notification if the wrapped Manager method does not raise an
+    ``Exception`` (such as ``keystone.exception.NotFound``).
+
+    :param operation:  one of the values from ACTIONS
+    :param resource_type: type of resource being affected
+    :param public:  If True (default), the event will be sent to the notifier
+                API.  If False, the event will only be sent via
+                notify_event_callbacks to in process listeners
+
+    """
+    def __init__(self, operation, resource_type, public=True,
+                 resource_id_arg_index=1, result_id_arg_attr=None):
+        self.operation = operation
+        self.resource_type = resource_type
+        self.public = public
+        self.resource_id_arg_index = resource_id_arg_index
+        self.result_id_arg_attr = result_id_arg_attr
+
+    def __call__(self, f):
+        def wrapper(*args, **kwargs):
+            """Send a notification if the wrapped callable is successful."""
+            try:
+                result = f(*args, **kwargs)
+            except Exception:
+                raise
+            else:
+                if self.result_id_arg_attr is not None:
+                    resource_id = result[self.result_id_arg_attr]
+                else:
+                    resource_id = args[self.resource_id_arg_index]
+
+                # NOTE(stevemar): the _send_notification function is
+                # overloaded, it's used to register callbacks and to actually
+                # send the notification externally. Thus, we should check
+                # the desired notification format in the function instead
+                # of before it.
+                _send_notification(
+                    self.operation,
+                    self.resource_type,
+                    resource_id,
+                    public=self.public)
+
+                # Only emit CADF notifications for public events
+                if CONF.notification_format == 'cadf' and self.public:
+                    outcome = taxonomy.OUTCOME_SUCCESS
+                    # NOTE(morganfainberg): The decorator form will always use
+                    # a 'None' initiator, since we do not pass context around
+                    # in a manner that allows the decorator to inspect context
+                    # and extract the needed information.
+                    initiator = None
+                    _create_cadf_payload(self.operation, self.resource_type,
+                                         resource_id, outcome, initiator)
+            return result
+
+        return wrapper
+
+
+def created(*args, **kwargs):
+    """Decorator to send notifications for ``Manager.create_*`` methods."""
+    return ManagerNotificationWrapper(ACTIONS.created, *args, **kwargs)
+
+
+def updated(*args, **kwargs):
+    """Decorator to send notifications for ``Manager.update_*`` methods."""
+    return ManagerNotificationWrapper(ACTIONS.updated, *args, **kwargs)
+
+
+def disabled(*args, **kwargs):
+    """Decorator to send notifications when an object is disabled."""
+    return ManagerNotificationWrapper(ACTIONS.disabled, *args, **kwargs)
+
+
+def deleted(*args, **kwargs):
+    """Decorator to send notifications for ``Manager.delete_*`` methods."""
+    return ManagerNotificationWrapper(ACTIONS.deleted, *args, **kwargs)
+
+
+def internal(*args, **kwargs):
+    """Decorator to send notifications for internal notifications only."""
+    kwargs['public'] = False
+    return ManagerNotificationWrapper(ACTIONS.internal, *args, **kwargs)
+
+
+def _get_callback_info(callback):
+    """Return list containing callback's module and name.
+
+    If the callback is an instance method also return the class name.
+
+    :param callback: Function to call
+    :type callback: function
+    :returns: List containing parent module, (optional class,) function name
+    :rtype: list
+    """
+    if getattr(callback, 'im_class', None):
+        return [getattr(callback, '__module__', None),
+                callback.im_class.__name__,
+                callback.__name__]
+    else:
+        return [getattr(callback, '__module__', None), callback.__name__]
+
+
+def register_event_callback(event, resource_type, callbacks):
+    """Register each callback with the event.
+
+    :param event: Action being registered
+    :type event: keystone.notifications.ACTIONS
+    :param resource_type: Type of resource being operated on
+    :type resource_type: str
+    :param callbacks: Callback items to be registered with event
+    :type callbacks: list
+    :raises ValueError: If event is not a valid ACTION
+    :raises TypeError: If callback is not callable
+    """
+    if event not in ACTIONS:
+        raise ValueError(_('%(event)s is not a valid notification event, must '
+                           'be one of: %(actions)s') %
+                         {'event': event, 'actions': ', '.join(ACTIONS)})
+
+    if not hasattr(callbacks, '__iter__'):
+        callbacks = [callbacks]
+
+    for callback in callbacks:
+        if not callable(callback):
+            msg = _('Method not callable: %s') % callback
+            LOG.error(msg)
+            raise TypeError(msg)
+        _SUBSCRIBERS.setdefault(event, {}).setdefault(resource_type, set())
+        _SUBSCRIBERS[event][resource_type].add(callback)
+
+        if LOG.logger.getEffectiveLevel() <= logging.DEBUG:
+            # Do this only if its going to appear in the logs.
+            msg = 'Callback: `%(callback)s` subscribed to event `%(event)s`.'
+            callback_info = _get_callback_info(callback)
+            callback_str = '.'.join(i for i in callback_info if i is not None)
+            event_str = '.'.join(['identity', resource_type, event])
+            LOG.debug(msg, {'callback': callback_str, 'event': event_str})
+
+
+def notify_event_callbacks(service, resource_type, operation, payload):
+    """Sends a notification to registered extensions."""
+    if operation in _SUBSCRIBERS:
+        if resource_type in _SUBSCRIBERS[operation]:
+            for cb in _SUBSCRIBERS[operation][resource_type]:
+                subst_dict = {'cb_name': cb.__name__,
+                              'service': service,
+                              'resource_type': resource_type,
+                              'operation': operation,
+                              'payload': payload}
+                LOG.debug('Invoking callback %(cb_name)s for event '
+                          '%(service)s %(resource_type)s %(operation)s for'
+                          '%(payload)s', subst_dict)
+                cb(service, resource_type, operation, payload)
+
+
+def _get_notifier():
+    """Return a notifier object.
+
+    If _notifier is None it means that a notifier object has not been set.
+    If _notifier is False it means that a notifier has previously failed to
+    construct.
+    Otherwise it is a constructed Notifier object.
+    """
+    global _notifier
+
+    if _notifier is None:
+        host = CONF.default_publisher_id or socket.gethostname()
+        try:
+            transport = oslo_messaging.get_transport(CONF)
+            _notifier = oslo_messaging.Notifier(transport,
+                                                "identity.%s" % host)
+        except Exception:
+            LOG.exception(_LE("Failed to construct notifier"))
+            _notifier = False
+
+    return _notifier
+
+
+def clear_subscribers():
+    """Empty subscribers dictionary.
+
+    This effectively stops notifications since there will be no subscribers
+    to publish to.
+    """
+    _SUBSCRIBERS.clear()
+
+
+def reset_notifier():
+    """Reset the notifications internal state.
+
+    This is used only for testing purposes.
+
+    """
+    global _notifier
+    _notifier = None
+
+
+def _create_cadf_payload(operation, resource_type, resource_id,
+                         outcome, initiator):
+    """Prepare data for CADF audit notifier.
+
+    Transform the arguments into content to be consumed by the function that
+    emits CADF events (_send_audit_notification). Specifically the
+    ``resource_type`` (role, user, etc) must be transformed into a CADF
+    keyword, such as: ``data/security/role``. The ``resource_id`` is added as a
+    top level value for the ``resource_info`` key. Lastly, the ``operation`` is
+    used to create the CADF ``action``, and the ``event_type`` name.
+
+    As per the CADF specification, the ``action`` must start with create,
+    update, delete, etc... i.e.: created.user or deleted.role
+
+    However the ``event_type`` is an OpenStack-ism that is typically of the
+    form project.resource.operation. i.e.: identity.project.updated
+
+    :param operation: operation being performed (created, updated, or deleted)
+    :param resource_type: type of resource being operated on (role, user, etc)
+    :param resource_id: ID of resource being operated on
+    :param outcome: outcomes of the operation (SUCCESS, FAILURE, etc)
+    :param initiator: CADF representation of the user that created the request
+    """
+
+    if resource_type not in CADF_TYPE_MAP:
+        target_uri = taxonomy.UNKNOWN
+    else:
+        target_uri = CADF_TYPE_MAP.get(resource_type)
+    target = resource.Resource(typeURI=target_uri,
+                               id=resource_id)
+
+    audit_kwargs = {'resource_info': resource_id}
+    cadf_action = '%s.%s' % (operation, resource_type)
+    event_type = '%s.%s.%s' % (SERVICE, resource_type, operation)
+
+    _send_audit_notification(cadf_action, initiator, outcome,
+                             target, event_type, **audit_kwargs)
+
+
+def _send_notification(operation, resource_type, resource_id, public=True):
+    """Send notification to inform observers about the affected resource.
+
+    This method doesn't raise an exception when sending the notification fails.
+
+    :param operation: operation being performed (created, updated, or deleted)
+    :param resource_type: type of resource being operated on
+    :param resource_id: ID of resource being operated on
+    :param public:  if True (default), the event will be sent
+                    to the notifier API.
+                    if False, the event will only be sent via
+                    notify_event_callbacks to in process listeners.
+    """
+    payload = {'resource_info': resource_id}
+
+    notify_event_callbacks(SERVICE, resource_type, operation, payload)
+
+    # Only send this notification if the 'basic' format is used, otherwise
+    # let the CADF functions handle sending the notification. But we check
+    # here so as to not disrupt the notify_event_callbacks function.
+    if public and CONF.notification_format == 'basic':
+        notifier = _get_notifier()
+        if notifier:
+            context = {}
+            event_type = '%(service)s.%(resource_type)s.%(operation)s' % {
+                'service': SERVICE,
+                'resource_type': resource_type,
+                'operation': operation}
+            try:
+                notifier.info(context, event_type, payload)
+            except Exception:
+                LOG.exception(_LE(
+                    'Failed to send %(res_id)s %(event_type)s notification'),
+                    {'res_id': resource_id, 'event_type': event_type})
+
+
+def _get_request_audit_info(context, user_id=None):
+    """Collect audit information about the request used for CADF.
+
+    :param context: Request context
+    :param user_id: Optional user ID, alternatively collected from context
+    :returns: Auditing data about the request
+    :rtype: :class:`pycadf.Resource`
+    """
+
+    remote_addr = None
+    http_user_agent = None
+    project_id = None
+    domain_id = None
+
+    if context and 'environment' in context and context['environment']:
+        environment = context['environment']
+        remote_addr = environment.get('REMOTE_ADDR')
+        http_user_agent = environment.get('HTTP_USER_AGENT')
+        if not user_id:
+            user_id = environment.get('KEYSTONE_AUTH_CONTEXT',
+                                      {}).get('user_id')
+        project_id = environment.get('KEYSTONE_AUTH_CONTEXT',
+                                     {}).get('project_id')
+        domain_id = environment.get('KEYSTONE_AUTH_CONTEXT',
+                                    {}).get('domain_id')
+
+    host = pycadf.host.Host(address=remote_addr, agent=http_user_agent)
+    initiator = resource.Resource(typeURI=taxonomy.ACCOUNT_USER,
+                                  id=user_id, host=host)
+    if project_id:
+        initiator.project_id = project_id
+    if domain_id:
+        initiator.domain_id = domain_id
+
+    return initiator
+
+
+class CadfNotificationWrapper(object):
+    """Send CADF event notifications for various methods.
+
+    This function is only used for Authentication events. Its ``action`` and
+    ``event_type`` are dictated below.
+
+    - action: authenticate
+    - event_type: identity.authenticate
+
+    Sends CADF notifications for events such as whether an authentication was
+    successful or not.
+
+    :param operation: The authentication related action being performed
+
+    """
+
+    def __init__(self, operation):
+        self.action = operation
+        self.event_type = '%s.%s' % (SERVICE, operation)
+
+    def __call__(self, f):
+        def wrapper(wrapped_self, context, user_id, *args, **kwargs):
+            """Always send a notification."""
+
+            initiator = _get_request_audit_info(context, user_id)
+            target = resource.Resource(typeURI=taxonomy.ACCOUNT_USER)
+            try:
+                result = f(wrapped_self, context, user_id, *args, **kwargs)
+            except Exception:
+                # For authentication failure send a cadf event as well
+                _send_audit_notification(self.action, initiator,
+                                         taxonomy.OUTCOME_FAILURE,
+                                         target, self.event_type)
+                raise
+            else:
+                _send_audit_notification(self.action, initiator,
+                                         taxonomy.OUTCOME_SUCCESS,
+                                         target, self.event_type)
+                return result
+
+        return wrapper
+
+
+class CadfRoleAssignmentNotificationWrapper(object):
+    """Send CADF notifications for ``role_assignment`` methods.
+
+    This function is only used for role assignment events. Its ``action`` and
+    ``event_type`` are dictated below.
+
+    - action: created.role_assignment or deleted.role_assignment
+    - event_type: identity.role_assignment.created or
+        identity.role_assignment.deleted
+
+    Sends a CADF notification if the wrapped method does not raise an
+    ``Exception`` (such as ``keystone.exception.NotFound``).
+
+    :param operation: one of the values from ACTIONS (create or delete)
+    """
+
+    ROLE_ASSIGNMENT = 'role_assignment'
+
+    def __init__(self, operation):
+        self.action = '%s.%s' % (operation, self.ROLE_ASSIGNMENT)
+        self.event_type = '%s.%s.%s' % (SERVICE, operation,
+                                        self.ROLE_ASSIGNMENT)
+
+    def __call__(self, f):
+        def wrapper(wrapped_self, role_id, *args, **kwargs):
+            """Send a notification if the wrapped callable is successful."""
+
+            """ NOTE(stevemar): The reason we go through checking kwargs
+            and args for possible target and actor values is because the
+            create_grant() (and delete_grant()) method are called
+            differently in various tests.
+            Using named arguments, i.e.:
+                create_grant(user_id=user['id'], domain_id=domain['id'],
+                             role_id=role['id'])
+
+            Or, using positional arguments, i.e.:
+                create_grant(role_id['id'], user['id'], None,
+                             domain_id=domain['id'], None)
+
+            Or, both, i.e.:
+                create_grant(role_id['id'], user_id=user['id'],
+                             domain_id=domain['id'])
+
+            Checking the values for kwargs is easy enough, since it comes
+            in as a dictionary
+
+            The actual method signature is
+                create_grant(role_id, user_id=None, group_id=None,
+                             domain_id=None, project_id=None,
+                             inherited_to_projects=False)
+
+            So, if the values of actor or target are still None after
+            checking kwargs, we can check the positional arguments,
+            based on the method signature.
+            """
+            call_args = inspect.getcallargs(
+                f, wrapped_self, role_id, *args, **kwargs)
+            inherited = call_args['inherited_to_projects']
+            context = call_args['context']
+
+            initiator = _get_request_audit_info(context)
+            target = resource.Resource(typeURI=taxonomy.ACCOUNT_USER)
+
+            audit_kwargs = {}
+            if call_args['project_id']:
+                audit_kwargs['project'] = call_args['project_id']
+            elif call_args['domain_id']:
+                audit_kwargs['domain'] = call_args['domain_id']
+
+            if call_args['user_id']:
+                audit_kwargs['user'] = call_args['user_id']
+            elif call_args['group_id']:
+                audit_kwargs['group'] = call_args['group_id']
+
+            audit_kwargs['inherited_to_projects'] = inherited
+            audit_kwargs['role'] = role_id
+
+            try:
+                result = f(wrapped_self, role_id, *args, **kwargs)
+            except Exception:
+                _send_audit_notification(self.action, initiator,
+                                         taxonomy.OUTCOME_FAILURE,
+                                         target, self.event_type,
+                                         **audit_kwargs)
+                raise
+            else:
+                _send_audit_notification(self.action, initiator,
+                                         taxonomy.OUTCOME_SUCCESS,
+                                         target, self.event_type,
+                                         **audit_kwargs)
+                return result
+
+        return wrapper
+
+
+def send_saml_audit_notification(action, context, user_id, group_ids,
+                                 identity_provider, protocol, token_id,
+                                 outcome):
+    """Send notification to inform observers about SAML events.
+
+    :param action: Action being audited
+    :type action: str
+    :param context: Current request context to collect request info from
+    :type context: dict
+    :param user_id: User ID from Keystone token
+    :type user_id: str
+    :param group_ids: List of Group IDs from Keystone token
+    :type group_ids: list
+    :param identity_provider: ID of the IdP from the Keystone token
+    :type identity_provider: str or None
+    :param protocol: Protocol ID for IdP from the Keystone token
+    :type protocol: str
+    :param token_id: audit_id from Keystone token
+    :type token_id: str or None
+    :param outcome: One of :class:`pycadf.cadftaxonomy`
+    :type outcome: str
+    """
+
+    initiator = _get_request_audit_info(context)
+    target = resource.Resource(typeURI=taxonomy.ACCOUNT_USER)
+    audit_type = SAML_AUDIT_TYPE
+    user_id = user_id or taxonomy.UNKNOWN
+    token_id = token_id or taxonomy.UNKNOWN
+    group_ids = group_ids or []
+    cred = credential.FederatedCredential(token=token_id, type=audit_type,
+                                          identity_provider=identity_provider,
+                                          user=user_id, groups=group_ids)
+    initiator.credential = cred
+    event_type = '%s.%s' % (SERVICE, action)
+    _send_audit_notification(action, initiator, outcome, target, event_type)
+
+
+def _send_audit_notification(action, initiator, outcome, target,
+                             event_type, **kwargs):
+    """Send CADF notification to inform observers about the affected resource.
+
+    This method logs an exception when sending the notification fails.
+
+    :param action: CADF action being audited (e.g., 'authenticate')
+    :param initiator: CADF resource representing the initiator
+    :param outcome: The CADF outcome (taxonomy.OUTCOME_PENDING,
+        taxonomy.OUTCOME_SUCCESS, taxonomy.OUTCOME_FAILURE)
+    :param target: CADF resource representing the target
+    :param event_type: An OpenStack-ism, typically this is the meter name that
+        Ceilometer uses to poll events.
+    :param kwargs: Any additional arguments passed in will be added as
+        key-value pairs to the CADF event.
+
+    """
+
+    event = eventfactory.EventFactory().new_event(
+        eventType=cadftype.EVENTTYPE_ACTIVITY,
+        outcome=outcome,
+        action=action,
+        initiator=initiator,
+        target=target,
+        observer=resource.Resource(typeURI=taxonomy.SERVICE_SECURITY))
+
+    for key, value in kwargs.items():
+        setattr(event, key, value)
+
+    context = {}
+    payload = event.as_dict()
+    notifier = _get_notifier()
+
+    if notifier:
+        try:
+            notifier.info(context, event_type, payload)
+        except Exception:
+            # diaper defense: any exception that occurs while emitting the
+            # notification should not interfere with the API request
+            LOG.exception(_LE(
+                'Failed to send %(action)s %(event_type)s notification'),
+                {'action': action, 'event_type': event_type})
+
+
+emit_event = CadfNotificationWrapper
+
+
+role_assignment = CadfRoleAssignmentNotificationWrapper
diff --git a/keystone-moon/keystone/openstack/__init__.py b/keystone-moon/keystone/openstack/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/openstack/common/README b/keystone-moon/keystone/openstack/common/README
new file mode 100644 (file)
index 0000000..0700c72
--- /dev/null
@@ -0,0 +1,13 @@
+openstack-common
+----------------
+
+A number of modules from openstack-common are imported into this project.
+
+These modules are "incubating" in openstack-common and are kept in sync
+with the help of openstack-common's update.py script. See:
+
+  https://wiki.openstack.org/wiki/Oslo#Syncing_Code_from_Incubator
+
+The copy of the code should never be directly modified here. Please
+always update openstack-common first and then run the script to copy
+the changes across.
diff --git a/keystone-moon/keystone/openstack/common/__init__.py b/keystone-moon/keystone/openstack/common/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/openstack/common/_i18n.py b/keystone-moon/keystone/openstack/common/_i18n.py
new file mode 100644 (file)
index 0000000..76a74c0
--- /dev/null
@@ -0,0 +1,45 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""oslo.i18n integration module.
+
+See http://docs.openstack.org/developer/oslo.i18n/usage.html
+
+"""
+
+try:
+    import oslo_i18n
+
+    # NOTE(dhellmann): This reference to o-s-l-o will be replaced by the
+    # application name when this module is synced into the separate
+    # repository. It is OK to have more than one translation function
+    # using the same domain, since there will still only be one message
+    # catalog.
+    _translators = oslo_i18n.TranslatorFactory(domain='keystone')
+
+    # The primary translation function using the well-known name "_"
+    _ = _translators.primary
+
+    # Translators for log levels.
+    #
+    # The abbreviated names are meant to reflect the usual use of a short
+    # name like '_'. The "L" is for "log" and the other letter comes from
+    # the level.
+    _LI = _translators.log_info
+    _LW = _translators.log_warning
+    _LE = _translators.log_error
+    _LC = _translators.log_critical
+except ImportError:
+    # NOTE(dims): Support for cases where a project wants to use
+    # code from oslo-incubator, but is not ready to be internationalized
+    # (like tempest)
+    _ = _LI = _LW = _LE = _LC = lambda x: x
diff --git a/keystone-moon/keystone/openstack/common/eventlet_backdoor.py b/keystone-moon/keystone/openstack/common/eventlet_backdoor.py
new file mode 100644 (file)
index 0000000..c656d81
--- /dev/null
@@ -0,0 +1,151 @@
+# Copyright (c) 2012 OpenStack Foundation.
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from __future__ import print_function
+
+import copy
+import errno
+import gc
+import logging
+import os
+import pprint
+import socket
+import sys
+import traceback
+
+import eventlet.backdoor
+import greenlet
+from oslo_config import cfg
+
+from keystone.openstack.common._i18n import _LI
+
+help_for_backdoor_port = (
+    "Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
+    "in listening on a random tcp port number; <port> results in listening "
+    "on the specified port number (and not enabling backdoor if that port "
+    "is in use); and <start>:<end> results in listening on the smallest "
+    "unused port number within the specified range of port numbers.  The "
+    "chosen port is displayed in the service's log file.")
+eventlet_backdoor_opts = [
+    cfg.StrOpt('backdoor_port',
+               help="Enable eventlet backdoor.  %s" % help_for_backdoor_port)
+]
+
+CONF = cfg.CONF
+CONF.register_opts(eventlet_backdoor_opts)
+LOG = logging.getLogger(__name__)
+
+
+def list_opts():
+    """Entry point for oslo-config-generator.
+    """
+    return [(None, copy.deepcopy(eventlet_backdoor_opts))]
+
+
+class EventletBackdoorConfigValueError(Exception):
+    def __init__(self, port_range, help_msg, ex):
+        msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
+               '%(help)s' %
+               {'range': port_range, 'ex': ex, 'help': help_msg})
+        super(EventletBackdoorConfigValueError, self).__init__(msg)
+        self.port_range = port_range
+
+
+def _dont_use_this():
+    print("Don't use this, just disconnect instead")
+
+
+def _find_objects(t):
+    return [o for o in gc.get_objects() if isinstance(o, t)]
+
+
+def _print_greenthreads():
+    for i, gt in enumerate(_find_objects(greenlet.greenlet)):
+        print(i, gt)
+        traceback.print_stack(gt.gr_frame)
+        print()
+
+
+def _print_nativethreads():
+    for threadId, stack in sys._current_frames().items():
+        print(threadId)
+        traceback.print_stack(stack)
+        print()
+
+
+def _parse_port_range(port_range):
+    if ':' not in port_range:
+        start, end = port_range, port_range
+    else:
+        start, end = port_range.split(':', 1)
+    try:
+        start, end = int(start), int(end)
+        if end < start:
+            raise ValueError
+        return start, end
+    except ValueError as ex:
+        raise EventletBackdoorConfigValueError(port_range, ex,
+                                               help_for_backdoor_port)
+
+
+def _listen(host, start_port, end_port, listen_func):
+    try_port = start_port
+    while True:
+        try:
+            return listen_func((host, try_port))
+        except socket.error as exc:
+            if (exc.errno != errno.EADDRINUSE or
+               try_port >= end_port):
+                raise
+            try_port += 1
+
+
+def initialize_if_enabled():
+    backdoor_locals = {
+        'exit': _dont_use_this,      # So we don't exit the entire process
+        'quit': _dont_use_this,      # So we don't exit the entire process
+        'fo': _find_objects,
+        'pgt': _print_greenthreads,
+        'pnt': _print_nativethreads,
+    }
+
+    if CONF.backdoor_port is None:
+        return None
+
+    start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
+
+    # NOTE(johannes): The standard sys.displayhook will print the value of
+    # the last expression and set it to __builtin__._, which overwrites
+    # the __builtin__._ that gettext sets. Let's switch to using pprint
+    # since it won't interact poorly with gettext, and it's easier to
+    # read the output too.
+    def displayhook(val):
+        if val is not None:
+            pprint.pprint(val)
+    sys.displayhook = displayhook
+
+    sock = _listen('localhost', start_port, end_port, eventlet.listen)
+
+    # In the case of backdoor port being zero, a port number is assigned by
+    # listen().  In any case, pull the port number out here.
+    port = sock.getsockname()[1]
+    LOG.info(
+        _LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
+        {'port': port, 'pid': os.getpid()}
+    )
+    eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
+                     locals=backdoor_locals)
+    return port
diff --git a/keystone-moon/keystone/openstack/common/fileutils.py b/keystone-moon/keystone/openstack/common/fileutils.py
new file mode 100644 (file)
index 0000000..9097c35
--- /dev/null
@@ -0,0 +1,149 @@
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import contextlib
+import errno
+import logging
+import os
+import stat
+import tempfile
+
+from oslo_utils import excutils
+
+LOG = logging.getLogger(__name__)
+
+_FILE_CACHE = {}
+DEFAULT_MODE = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO
+
+
+def ensure_tree(path, mode=DEFAULT_MODE):
+    """Create a directory (and any ancestor directories required)
+
+    :param path: Directory to create
+    :param mode: Directory creation permissions
+    """
+    try:
+        os.makedirs(path, mode)
+    except OSError as exc:
+        if exc.errno == errno.EEXIST:
+            if not os.path.isdir(path):
+                raise
+        else:
+            raise
+
+
+def read_cached_file(filename, force_reload=False):
+    """Read from a file if it has been modified.
+
+    :param force_reload: Whether to reload the file.
+    :returns: A tuple with a boolean specifying if the data is fresh
+              or not.
+    """
+    global _FILE_CACHE
+
+    if force_reload:
+        delete_cached_file(filename)
+
+    reloaded = False
+    mtime = os.path.getmtime(filename)
+    cache_info = _FILE_CACHE.setdefault(filename, {})
+
+    if not cache_info or mtime > cache_info.get('mtime', 0):
+        LOG.debug("Reloading cached file %s" % filename)
+        with open(filename) as fap:
+            cache_info['data'] = fap.read()
+        cache_info['mtime'] = mtime
+        reloaded = True
+    return (reloaded, cache_info['data'])
+
+
+def delete_cached_file(filename):
+    """Delete cached file if present.
+
+    :param filename: filename to delete
+    """
+    global _FILE_CACHE
+
+    if filename in _FILE_CACHE:
+        del _FILE_CACHE[filename]
+
+
+def delete_if_exists(path, remove=os.unlink):
+    """Delete a file, but ignore file not found error.
+
+    :param path: File to delete
+    :param remove: Optional function to remove passed path
+    """
+
+    try:
+        remove(path)
+    except OSError as e:
+        if e.errno != errno.ENOENT:
+            raise
+
+
+@contextlib.contextmanager
+def remove_path_on_error(path, remove=delete_if_exists):
+    """Protect code that wants to operate on PATH atomically.
+    Any exception will cause PATH to be removed.
+
+    :param path: File to work with
+    :param remove: Optional function to remove passed path
+    """
+
+    try:
+        yield
+    except Exception:
+        with excutils.save_and_reraise_exception():
+            remove(path)
+
+
+def file_open(*args, **kwargs):
+    """Open file
+
+    see built-in open() documentation for more details
+
+    Note: The reason this is kept in a separate module is to easily
+    be able to provide a stub module that doesn't alter system
+    state at all (for unit tests)
+    """
+    return open(*args, **kwargs)
+
+
+def write_to_tempfile(content, path=None, suffix='', prefix='tmp'):
+    """Create temporary file or use existing file.
+
+    This util is needed for creating temporary file with
+    specified content, suffix and prefix. If path is not None,
+    it will be used for writing content. If the path doesn't
+    exist it'll be created.
+
+    :param content: content for temporary file.
+    :param path: same as parameter 'dir' for mkstemp
+    :param suffix: same as parameter 'suffix' for mkstemp
+    :param prefix: same as parameter 'prefix' for mkstemp
+
+    For example: it can be used in database tests for creating
+    configuration files.
+    """
+    if path:
+        ensure_tree(path)
+
+    (fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix)
+    try:
+        os.write(fd, content)
+    finally:
+        os.close(fd)
+    return path
diff --git a/keystone-moon/keystone/openstack/common/loopingcall.py b/keystone-moon/keystone/openstack/common/loopingcall.py
new file mode 100644 (file)
index 0000000..39eed47
--- /dev/null
@@ -0,0 +1,147 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import logging
+import sys
+import time
+
+from eventlet import event
+from eventlet import greenthread
+
+from keystone.openstack.common._i18n import _LE, _LW
+
+LOG = logging.getLogger(__name__)
+
+# NOTE(zyluo): This lambda function was declared to avoid mocking collisions
+#              with time.time() called in the standard logging module
+#              during unittests.
+_ts = lambda: time.time()
+
+
+class LoopingCallDone(Exception):
+    """Exception to break out and stop a LoopingCallBase.
+
+    The poll-function passed to LoopingCallBase can raise this exception to
+    break out of the loop normally. This is somewhat analogous to
+    StopIteration.
+
+    An optional return-value can be included as the argument to the exception;
+    this return-value will be returned by LoopingCallBase.wait()
+
+    """
+
+    def __init__(self, retvalue=True):
+        """:param retvalue: Value that LoopingCallBase.wait() should return."""
+        self.retvalue = retvalue
+
+
+class LoopingCallBase(object):
+    def __init__(self, f=None, *args, **kw):
+        self.args = args
+        self.kw = kw
+        self.f = f
+        self._running = False
+        self.done = None
+
+    def stop(self):
+        self._running = False
+
+    def wait(self):
+        return self.done.wait()
+
+
+class FixedIntervalLoopingCall(LoopingCallBase):
+    """A fixed interval looping call."""
+
+    def start(self, interval, initial_delay=None):
+        self._running = True
+        done = event.Event()
+
+        def _inner():
+            if initial_delay:
+                greenthread.sleep(initial_delay)
+
+            try:
+                while self._running:
+                    start = _ts()
+                    self.f(*self.args, **self.kw)
+                    end = _ts()
+                    if not self._running:
+                        break
+                    delay = end - start - interval
+                    if delay > 0:
+                        LOG.warn(_LW('task %(func_name)r run outlasted '
+                                     'interval by %(delay).2f sec'),
+                                 {'func_name': self.f, 'delay': delay})
+                    greenthread.sleep(-delay if delay < 0 else 0)
+            except LoopingCallDone as e:
+                self.stop()
+                done.send(e.retvalue)
+            except Exception:
+                LOG.exception(_LE('in fixed duration looping call'))
+                done.send_exception(*sys.exc_info())
+                return
+            else:
+                done.send(True)
+
+        self.done = done
+
+        greenthread.spawn_n(_inner)
+        return self.done
+
+
+class DynamicLoopingCall(LoopingCallBase):
+    """A looping call which sleeps until the next known event.
+
+    The function called should return how long to sleep for before being
+    called again.
+    """
+
+    def start(self, initial_delay=None, periodic_interval_max=None):
+        self._running = True
+        done = event.Event()
+
+        def _inner():
+            if initial_delay:
+                greenthread.sleep(initial_delay)
+
+            try:
+                while self._running:
+                    idle = self.f(*self.args, **self.kw)
+                    if not self._running:
+                        break
+
+                    if periodic_interval_max is not None:
+                        idle = min(idle, periodic_interval_max)
+                    LOG.debug('Dynamic looping call %(func_name)r sleeping '
+                              'for %(idle).02f seconds',
+                              {'func_name': self.f, 'idle': idle})
+                    greenthread.sleep(idle)
+            except LoopingCallDone as e:
+                self.stop()
+                done.send(e.retvalue)
+            except Exception:
+                LOG.exception(_LE('in dynamic looping call'))
+                done.send_exception(*sys.exc_info())
+                return
+            else:
+                done.send(True)
+
+        self.done = done
+
+        greenthread.spawn(_inner)
+        return self.done
diff --git a/keystone-moon/keystone/openstack/common/service.py b/keystone-moon/keystone/openstack/common/service.py
new file mode 100644 (file)
index 0000000..cfae56b
--- /dev/null
@@ -0,0 +1,495 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Generic Node base class for all workers that run on hosts."""
+
+import errno
+import logging
+import os
+import random
+import signal
+import sys
+import time
+
+try:
+    # Importing just the symbol here because the io module does not
+    # exist in Python 2.6.
+    from io import UnsupportedOperation  # noqa
+except ImportError:
+    # Python 2.6
+    UnsupportedOperation = None
+
+import eventlet
+from eventlet import event
+from oslo_config import cfg
+
+from keystone.openstack.common import eventlet_backdoor
+from keystone.openstack.common._i18n import _LE, _LI, _LW
+from keystone.openstack.common import systemd
+from keystone.openstack.common import threadgroup
+
+
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
+
+
+def _sighup_supported():
+    return hasattr(signal, 'SIGHUP')
+
+
+def _is_daemon():
+    # The process group for a foreground process will match the
+    # process group of the controlling terminal. If those values do
+    # not match, or ioctl() fails on the stdout file handle, we assume
+    # the process is running in the background as a daemon.
+    # http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
+    try:
+        is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
+    except OSError as err:
+        if err.errno == errno.ENOTTY:
+            # Assume we are a daemon because there is no terminal.
+            is_daemon = True
+        else:
+            raise
+    except UnsupportedOperation:
+        # Could not get the fileno for stdout, so we must be a daemon.
+        is_daemon = True
+    return is_daemon
+
+
+def _is_sighup_and_daemon(signo):
+    if not (_sighup_supported() and signo == signal.SIGHUP):
+        # Avoid checking if we are a daemon, because the signal isn't
+        # SIGHUP.
+        return False
+    return _is_daemon()
+
+
+def _signo_to_signame(signo):
+    signals = {signal.SIGTERM: 'SIGTERM',
+               signal.SIGINT: 'SIGINT'}
+    if _sighup_supported():
+        signals[signal.SIGHUP] = 'SIGHUP'
+    return signals[signo]
+
+
+def _set_signals_handler(handler):
+    signal.signal(signal.SIGTERM, handler)
+    signal.signal(signal.SIGINT, handler)
+    if _sighup_supported():
+        signal.signal(signal.SIGHUP, handler)
+
+
+class Launcher(object):
+    """Launch one or more services and wait for them to complete."""
+
+    def __init__(self):
+        """Initialize the service launcher.
+
+        :returns: None
+
+        """
+        self.services = Services()
+        self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
+
+    def launch_service(self, service):
+        """Load and start the given service.
+
+        :param service: The service you would like to start.
+        :returns: None
+
+        """
+        service.backdoor_port = self.backdoor_port
+        self.services.add(service)
+
+    def stop(self):
+        """Stop all services which are currently running.
+
+        :returns: None
+
+        """
+        self.services.stop()
+
+    def wait(self):
+        """Waits until all services have been stopped, and then returns.
+
+        :returns: None
+
+        """
+        self.services.wait()
+
+    def restart(self):
+        """Reload config files and restart service.
+
+        :returns: None
+
+        """
+        cfg.CONF.reload_config_files()
+        self.services.restart()
+
+
+class SignalExit(SystemExit):
+    def __init__(self, signo, exccode=1):
+        super(SignalExit, self).__init__(exccode)
+        self.signo = signo
+
+
+class ServiceLauncher(Launcher):
+    def _handle_signal(self, signo, frame):
+        # Allow the process to be killed again and die from natural causes
+        _set_signals_handler(signal.SIG_DFL)
+        raise SignalExit(signo)
+
+    def handle_signal(self):
+        _set_signals_handler(self._handle_signal)
+
+    def _wait_for_exit_or_signal(self, ready_callback=None):
+        status = None
+        signo = 0
+
+        LOG.debug('Full set of CONF:')
+        CONF.log_opt_values(LOG, logging.DEBUG)
+
+        try:
+            if ready_callback:
+                ready_callback()
+            super(ServiceLauncher, self).wait()
+        except SignalExit as exc:
+            signame = _signo_to_signame(exc.signo)
+            LOG.info(_LI('Caught %s, exiting'), signame)
+            status = exc.code
+            signo = exc.signo
+        except SystemExit as exc:
+            status = exc.code
+        finally:
+            self.stop()
+
+        return status, signo
+
+    def wait(self, ready_callback=None):
+        systemd.notify_once()
+        while True:
+            self.handle_signal()
+            status, signo = self._wait_for_exit_or_signal(ready_callback)
+            if not _is_sighup_and_daemon(signo):
+                return status
+            self.restart()
+
+
+class ServiceWrapper(object):
+    def __init__(self, service, workers):
+        self.service = service
+        self.workers = workers
+        self.children = set()
+        self.forktimes = []
+
+
+class ProcessLauncher(object):
+    def __init__(self):
+        """Constructor."""
+
+        self.children = {}
+        self.sigcaught = None
+        self.running = True
+        rfd, self.writepipe = os.pipe()
+        self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
+        self.handle_signal()
+
+    def handle_signal(self):
+        _set_signals_handler(self._handle_signal)
+
+    def _handle_signal(self, signo, frame):
+        self.sigcaught = signo
+        self.running = False
+
+        # Allow the process to be killed again and die from natural causes
+        _set_signals_handler(signal.SIG_DFL)
+
+    def _pipe_watcher(self):
+        # This will block until the write end is closed when the parent
+        # dies unexpectedly
+        self.readpipe.read()
+
+        LOG.info(_LI('Parent process has died unexpectedly, exiting'))
+
+        sys.exit(1)
+
+    def _child_process_handle_signal(self):
+        # Setup child signal handlers differently
+        def _sigterm(*args):
+            signal.signal(signal.SIGTERM, signal.SIG_DFL)
+            raise SignalExit(signal.SIGTERM)
+
+        def _sighup(*args):
+            signal.signal(signal.SIGHUP, signal.SIG_DFL)
+            raise SignalExit(signal.SIGHUP)
+
+        signal.signal(signal.SIGTERM, _sigterm)
+        if _sighup_supported():
+            signal.signal(signal.SIGHUP, _sighup)
+        # Block SIGINT and let the parent send us a SIGTERM
+        signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+    def _child_wait_for_exit_or_signal(self, launcher):
+        status = 0
+        signo = 0
+
+        # NOTE(johannes): All exceptions are caught to ensure this
+        # doesn't fallback into the loop spawning children. It would
+        # be bad for a child to spawn more children.
+        try:
+            launcher.wait()
+        except SignalExit as exc:
+            signame = _signo_to_signame(exc.signo)
+            LOG.info(_LI('Child caught %s, exiting'), signame)
+            status = exc.code
+            signo = exc.signo
+        except SystemExit as exc:
+            status = exc.code
+        except BaseException:
+            LOG.exception(_LE('Unhandled exception'))
+            status = 2
+        finally:
+            launcher.stop()
+
+        return status, signo
+
+    def _child_process(self, service):
+        self._child_process_handle_signal()
+
+        # Reopen the eventlet hub to make sure we don't share an epoll
+        # fd with parent and/or siblings, which would be bad
+        eventlet.hubs.use_hub()
+
+        # Close write to ensure only parent has it open
+        os.close(self.writepipe)
+        # Create greenthread to watch for parent to close pipe
+        eventlet.spawn_n(self._pipe_watcher)
+
+        # Reseed random number generator
+        random.seed()
+
+        launcher = Launcher()
+        launcher.launch_service(service)
+        return launcher
+
+    def _start_child(self, wrap):
+        if len(wrap.forktimes) > wrap.workers:
+            # Limit ourselves to one process a second (over the period of
+            # number of workers * 1 second). This will allow workers to
+            # start up quickly but ensure we don't fork off children that
+            # die instantly too quickly.
+            if time.time() - wrap.forktimes[0] < wrap.workers:
+                LOG.info(_LI('Forking too fast, sleeping'))
+                time.sleep(1)
+
+            wrap.forktimes.pop(0)
+
+        wrap.forktimes.append(time.time())
+
+        pid = os.fork()
+        if pid == 0:
+            launcher = self._child_process(wrap.service)
+            while True:
+                self._child_process_handle_signal()
+                status, signo = self._child_wait_for_exit_or_signal(launcher)
+                if not _is_sighup_and_daemon(signo):
+                    break
+                launcher.restart()
+
+            os._exit(status)
+
+        LOG.info(_LI('Started child %d'), pid)
+
+        wrap.children.add(pid)
+        self.children[pid] = wrap
+
+        return pid
+
+    def launch_service(self, service, workers=1):
+        wrap = ServiceWrapper(service, workers)
+
+        LOG.info(_LI('Starting %d workers'), wrap.workers)
+        while self.running and len(wrap.children) < wrap.workers:
+            self._start_child(wrap)
+
+    def _wait_child(self):
+        try:
+            # Block while any of child processes have exited
+            pid, status = os.waitpid(0, 0)
+            if not pid:
+                return None
+        except OSError as exc:
+            if exc.errno not in (errno.EINTR, errno.ECHILD):
+                raise
+            return None
+
+        if os.WIFSIGNALED(status):
+            sig = os.WTERMSIG(status)
+            LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
+                     dict(pid=pid, sig=sig))
+        else:
+            code = os.WEXITSTATUS(status)
+            LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
+                     dict(pid=pid, code=code))
+
+        if pid not in self.children:
+            LOG.warning(_LW('pid %d not in child list'), pid)
+            return None
+
+        wrap = self.children.pop(pid)
+        wrap.children.remove(pid)
+        return wrap
+
+    def _respawn_children(self):
+        while self.running:
+            wrap = self._wait_child()
+            if not wrap:
+                continue
+            while self.running and len(wrap.children) < wrap.workers:
+                self._start_child(wrap)
+
+    def wait(self):
+        """Loop waiting on children to die and respawning as necessary."""
+
+        systemd.notify_once()
+        LOG.debug('Full set of CONF:')
+        CONF.log_opt_values(LOG, logging.DEBUG)
+
+        try:
+            while True:
+                self.handle_signal()
+                self._respawn_children()
+                # No signal means that stop was called.  Don't clean up here.
+                if not self.sigcaught:
+                    return
+
+                signame = _signo_to_signame(self.sigcaught)
+                LOG.info(_LI('Caught %s, stopping children'), signame)
+                if not _is_sighup_and_daemon(self.sigcaught):
+                    break
+
+                for pid in self.children:
+                    os.kill(pid, signal.SIGHUP)
+                self.running = True
+                self.sigcaught = None
+        except eventlet.greenlet.GreenletExit:
+            LOG.info(_LI("Wait called after thread killed. Cleaning up."))
+
+        self.stop()
+
+    def stop(self):
+        """Terminate child processes and wait on each."""
+        self.running = False
+        for pid in self.children:
+            try:
+                os.kill(pid, signal.SIGTERM)
+            except OSError as exc:
+                if exc.errno != errno.ESRCH:
+                    raise
+
+        # Wait for children to die
+        if self.children:
+            LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
+            while self.children:
+                self._wait_child()
+
+
+class Service(object):
+    """Service object for binaries running on hosts."""
+
+    def __init__(self, threads=1000):
+        self.tg = threadgroup.ThreadGroup(threads)
+
+        # signal that the service is done shutting itself down:
+        self._done = event.Event()
+
+    def reset(self):
+        # NOTE(Fengqian): docs for Event.reset() recommend against using it
+        self._done = event.Event()
+
+    def start(self):
+        pass
+
+    def stop(self, graceful=False):
+        self.tg.stop(graceful)
+        self.tg.wait()
+        # Signal that service cleanup is done:
+        if not self._done.ready():
+            self._done.send()
+
+    def wait(self):
+        self._done.wait()
+
+
+class Services(object):
+
+    def __init__(self):
+        self.services = []
+        self.tg = threadgroup.ThreadGroup()
+        self.done = event.Event()
+
+    def add(self, service):
+        self.services.append(service)
+        self.tg.add_thread(self.run_service, service, self.done)
+
+    def stop(self):
+        # wait for graceful shutdown of services:
+        for service in self.services:
+            service.stop()
+            service.wait()
+
+        # Each service has performed cleanup, now signal that the run_service
+        # wrapper threads can now die:
+        if not self.done.ready():
+            self.done.send()
+
+        # reap threads:
+        self.tg.stop()
+
+    def wait(self):
+        self.tg.wait()
+
+    def restart(self):
+        self.stop()
+        self.done = event.Event()
+        for restart_service in self.services:
+            restart_service.reset()
+            self.tg.add_thread(self.run_service, restart_service, self.done)
+
+    @staticmethod
+    def run_service(service, done):
+        """Service start wrapper.
+
+        :param service: service to run
+        :param done: event to wait on until a shutdown is triggered
+        :returns: None
+
+        """
+        service.start()
+        done.wait()
+
+
+def launch(service, workers=1):
+    if workers is None or workers == 1:
+        launcher = ServiceLauncher()
+        launcher.launch_service(service)
+    else:
+        launcher = ProcessLauncher()
+        launcher.launch_service(service, workers=workers)
+
+    return launcher
diff --git a/keystone-moon/keystone/openstack/common/systemd.py b/keystone-moon/keystone/openstack/common/systemd.py
new file mode 100644 (file)
index 0000000..36243b3
--- /dev/null
@@ -0,0 +1,105 @@
+# Copyright 2012-2014 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Helper module for systemd service readiness notification.
+"""
+
+import logging
+import os
+import socket
+import sys
+
+
+LOG = logging.getLogger(__name__)
+
+
+def _abstractify(socket_name):
+    if socket_name.startswith('@'):
+        # abstract namespace socket
+        socket_name = '\0%s' % socket_name[1:]
+    return socket_name
+
+
+def _sd_notify(unset_env, msg):
+    notify_socket = os.getenv('NOTIFY_SOCKET')
+    if notify_socket:
+        sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+        try:
+            sock.connect(_abstractify(notify_socket))
+            sock.sendall(msg)
+            if unset_env:
+                del os.environ['NOTIFY_SOCKET']
+        except EnvironmentError:
+            LOG.debug("Systemd notification failed", exc_info=True)
+        finally:
+            sock.close()
+
+
+def notify():
+    """Send notification to Systemd that service is ready.
+
+    For details see
+    http://www.freedesktop.org/software/systemd/man/sd_notify.html
+    """
+    _sd_notify(False, 'READY=1')
+
+
+def notify_once():
+    """Send notification once to Systemd that service is ready.
+
+    Systemd sets NOTIFY_SOCKET environment variable with the name of the
+    socket listening for notifications from services.
+    This method removes the NOTIFY_SOCKET environment variable to ensure
+    notification is sent only once.
+    """
+    _sd_notify(True, 'READY=1')
+
+
+def onready(notify_socket, timeout):
+    """Wait for systemd style notification on the socket.
+
+    :param notify_socket: local socket address
+    :type notify_socket:  string
+    :param timeout:       socket timeout
+    :type timeout:        float
+    :returns:             0 service ready
+                          1 service not ready
+                          2 timeout occurred
+    """
+    sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+    sock.settimeout(timeout)
+    sock.bind(_abstractify(notify_socket))
+    try:
+        msg = sock.recv(512)
+    except socket.timeout:
+        return 2
+    finally:
+        sock.close()
+    if 'READY=1' in msg:
+        return 0
+    else:
+        return 1
+
+
+if __name__ == '__main__':
+    # simple CLI for testing
+    if len(sys.argv) == 1:
+        notify()
+    elif len(sys.argv) >= 2:
+        timeout = float(sys.argv[1])
+        notify_socket = os.getenv('NOTIFY_SOCKET')
+        if notify_socket:
+            retval = onready(notify_socket, timeout)
+            sys.exit(retval)
diff --git a/keystone-moon/keystone/openstack/common/threadgroup.py b/keystone-moon/keystone/openstack/common/threadgroup.py
new file mode 100644 (file)
index 0000000..fc0bcb5
--- /dev/null
@@ -0,0 +1,149 @@
+# Copyright 2012 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import logging
+import threading
+
+import eventlet
+from eventlet import greenpool
+
+from keystone.openstack.common import loopingcall
+
+
+LOG = logging.getLogger(__name__)
+
+
+def _thread_done(gt, *args, **kwargs):
+    """Callback function to be passed to GreenThread.link() when we spawn()
+    Calls the :class:`ThreadGroup` to notify if.
+
+    """
+    kwargs['group'].thread_done(kwargs['thread'])
+
+
+class Thread(object):
+    """Wrapper around a greenthread, that holds a reference to the
+    :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
+    it has done so it can be removed from the threads list.
+    """
+    def __init__(self, thread, group):
+        self.thread = thread
+        self.thread.link(_thread_done, group=group, thread=self)
+
+    def stop(self):
+        self.thread.kill()
+
+    def wait(self):
+        return self.thread.wait()
+
+    def link(self, func, *args, **kwargs):
+        self.thread.link(func, *args, **kwargs)
+
+
+class ThreadGroup(object):
+    """The point of the ThreadGroup class is to:
+
+    * keep track of timers and greenthreads (making it easier to stop them
+      when need be).
+    * provide an easy API to add timers.
+    """
+    def __init__(self, thread_pool_size=10):
+        self.pool = greenpool.GreenPool(thread_pool_size)
+        self.threads = []
+        self.timers = []
+
+    def add_dynamic_timer(self, callback, initial_delay=None,
+                          periodic_interval_max=None, *args, **kwargs):
+        timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
+        timer.start(initial_delay=initial_delay,
+                    periodic_interval_max=periodic_interval_max)
+        self.timers.append(timer)
+
+    def add_timer(self, interval, callback, initial_delay=None,
+                  *args, **kwargs):
+        pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
+        pulse.start(interval=interval,
+                    initial_delay=initial_delay)
+        self.timers.append(pulse)
+
+    def add_thread(self, callback, *args, **kwargs):
+        gt = self.pool.spawn(callback, *args, **kwargs)
+        th = Thread(gt, self)
+        self.threads.append(th)
+        return th
+
+    def thread_done(self, thread):
+        self.threads.remove(thread)
+
+    def _stop_threads(self):
+        current = threading.current_thread()
+
+        # Iterate over a copy of self.threads so thread_done doesn't
+        # modify the list while we're iterating
+        for x in self.threads[:]:
+            if x is current:
+                # don't kill the current thread.
+                continue
+            try:
+                x.stop()
+            except eventlet.greenlet.GreenletExit:
+                pass
+            except Exception as ex:
+                LOG.exception(ex)
+
+    def stop_timers(self):
+        for x in self.timers:
+            try:
+                x.stop()
+            except Exception as ex:
+                LOG.exception(ex)
+        self.timers = []
+
+    def stop(self, graceful=False):
+        """stop function has the option of graceful=True/False.
+
+        * In case of graceful=True, wait for all threads to be finished.
+          Never kill threads.
+        * In case of graceful=False, kill threads immediately.
+        """
+        self.stop_timers()
+        if graceful:
+            # In case of graceful=True, wait for all threads to be
+            # finished, never kill threads
+            self.wait()
+        else:
+            # In case of graceful=False(Default), kill threads
+            # immediately
+            self._stop_threads()
+
+    def wait(self):
+        for x in self.timers:
+            try:
+                x.wait()
+            except eventlet.greenlet.GreenletExit:
+                pass
+            except Exception as ex:
+                LOG.exception(ex)
+        current = threading.current_thread()
+
+        # Iterate over a copy of self.threads so thread_done doesn't
+        # modify the list while we're iterating
+        for x in self.threads[:]:
+            if x is current:
+                continue
+            try:
+                x.wait()
+            except eventlet.greenlet.GreenletExit:
+                pass
+            except Exception as ex:
+                LOG.exception(ex)
diff --git a/keystone-moon/keystone/openstack/common/versionutils.py b/keystone-moon/keystone/openstack/common/versionutils.py
new file mode 100644 (file)
index 0000000..111bfd6
--- /dev/null
@@ -0,0 +1,262 @@
+# Copyright (c) 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Helpers for comparing version strings.
+"""
+
+import copy
+import functools
+import inspect
+import logging
+
+from oslo_config import cfg
+import pkg_resources
+import six
+
+from keystone.openstack.common._i18n import _
+
+
+LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+
+
+deprecated_opts = [
+    cfg.BoolOpt('fatal_deprecations',
+                default=False,
+                help='Enables or disables fatal status of deprecations.'),
+]
+
+
+def list_opts():
+    """Entry point for oslo.config-generator.
+    """
+    return [(None, copy.deepcopy(deprecated_opts))]
+
+
+class deprecated(object):
+    """A decorator to mark callables as deprecated.
+
+    This decorator logs a deprecation message when the callable it decorates is
+    used. The message will include the release where the callable was
+    deprecated, the release where it may be removed and possibly an optional
+    replacement.
+
+    Examples:
+
+    1. Specifying the required deprecated release
+
+    >>> @deprecated(as_of=deprecated.ICEHOUSE)
+    ... def a(): pass
+
+    2. Specifying a replacement:
+
+    >>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
+    ... def b(): pass
+
+    3. Specifying the release where the functionality may be removed:
+
+    >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
+    ... def c(): pass
+
+    4. Specifying the deprecated functionality will not be removed:
+    >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=0)
+    ... def d(): pass
+
+    5. Specifying a replacement, deprecated functionality will not be removed:
+    >>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()', remove_in=0)
+    ... def e(): pass
+
+    """
+
+    # NOTE(morganfainberg): Bexar is used for unit test purposes, it is
+    # expected we maintain a gap between Bexar and Folsom in this list.
+    BEXAR = 'B'
+    FOLSOM = 'F'
+    GRIZZLY = 'G'
+    HAVANA = 'H'
+    ICEHOUSE = 'I'
+    JUNO = 'J'
+    KILO = 'K'
+    LIBERTY = 'L'
+
+    _RELEASES = {
+        # NOTE(morganfainberg): Bexar is used for unit test purposes, it is
+        # expected we maintain a gap between Bexar and Folsom in this list.
+        'B': 'Bexar',
+        'F': 'Folsom',
+        'G': 'Grizzly',
+        'H': 'Havana',
+        'I': 'Icehouse',
+        'J': 'Juno',
+        'K': 'Kilo',
+        'L': 'Liberty',
+    }
+
+    _deprecated_msg_with_alternative = _(
+        '%(what)s is deprecated as of %(as_of)s in favor of '
+        '%(in_favor_of)s and may be removed in %(remove_in)s.')
+
+    _deprecated_msg_no_alternative = _(
+        '%(what)s is deprecated as of %(as_of)s and may be '
+        'removed in %(remove_in)s. It will not be superseded.')
+
+    _deprecated_msg_with_alternative_no_removal = _(
+        '%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s.')
+
+    _deprecated_msg_with_no_alternative_no_removal = _(
+        '%(what)s is deprecated as of %(as_of)s. It will not be superseded.')
+
+    def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
+        """Initialize decorator
+
+        :param as_of: the release deprecating the callable. Constants
+            are define in this class for convenience.
+        :param in_favor_of: the replacement for the callable (optional)
+        :param remove_in: an integer specifying how many releases to wait
+            before removing (default: 2)
+        :param what: name of the thing being deprecated (default: the
+            callable's name)
+
+        """
+        self.as_of = as_of
+        self.in_favor_of = in_favor_of
+        self.remove_in = remove_in
+        self.what = what
+
+    def __call__(self, func_or_cls):
+        if not self.what:
+            self.what = func_or_cls.__name__ + '()'
+        msg, details = self._build_message()
+
+        if inspect.isfunction(func_or_cls):
+
+            @six.wraps(func_or_cls)
+            def wrapped(*args, **kwargs):
+                report_deprecated_feature(LOG, msg, details)
+                return func_or_cls(*args, **kwargs)
+            return wrapped
+        elif inspect.isclass(func_or_cls):
+            orig_init = func_or_cls.__init__
+
+            # TODO(tsufiev): change `functools` module to `six` as
+            # soon as six 1.7.4 (with fix for passing `assigned`
+            # argument to underlying `functools.wraps`) is released
+            # and added to the oslo-incubator requrements
+            @functools.wraps(orig_init, assigned=('__name__', '__doc__'))
+            def new_init(self, *args, **kwargs):
+                report_deprecated_feature(LOG, msg, details)
+                orig_init(self, *args, **kwargs)
+            func_or_cls.__init__ = new_init
+            return func_or_cls
+        else:
+            raise TypeError('deprecated can be used only with functions or '
+                            'classes')
+
+    def _get_safe_to_remove_release(self, release):
+        # TODO(dstanek): this method will have to be reimplemented once
+        #    when we get to the X release because once we get to the Y
+        #    release, what is Y+2?
+        new_release = chr(ord(release) + self.remove_in)
+        if new_release in self._RELEASES:
+            return self._RELEASES[new_release]
+        else:
+            return new_release
+
+    def _build_message(self):
+        details = dict(what=self.what,
+                       as_of=self._RELEASES[self.as_of],
+                       remove_in=self._get_safe_to_remove_release(self.as_of))
+
+        if self.in_favor_of:
+            details['in_favor_of'] = self.in_favor_of
+            if self.remove_in > 0:
+                msg = self._deprecated_msg_with_alternative
+            else:
+                # There are no plans to remove this function, but it is
+                # now deprecated.
+                msg = self._deprecated_msg_with_alternative_no_removal
+        else:
+            if self.remove_in > 0:
+                msg = self._deprecated_msg_no_alternative
+            else:
+                # There are no plans to remove this function, but it is
+                # now deprecated.
+                msg = self._deprecated_msg_with_no_alternative_no_removal
+        return msg, details
+
+
+def is_compatible(requested_version, current_version, same_major=True):
+    """Determine whether `requested_version` is satisfied by
+    `current_version`; in other words, `current_version` is >=
+    `requested_version`.
+
+    :param requested_version: version to check for compatibility
+    :param current_version: version to check against
+    :param same_major: if True, the major version must be identical between
+        `requested_version` and `current_version`. This is used when a
+        major-version difference indicates incompatibility between the two
+        versions. Since this is the common-case in practice, the default is
+        True.
+    :returns: True if compatible, False if not
+    """
+    requested_parts = pkg_resources.parse_version(requested_version)
+    current_parts = pkg_resources.parse_version(current_version)
+
+    if same_major and (requested_parts[0] != current_parts[0]):
+        return False
+
+    return current_parts >= requested_parts
+
+
+# Track the messages we have sent already. See
+# report_deprecated_feature().
+_deprecated_messages_sent = {}
+
+
+def report_deprecated_feature(logger, msg, *args, **kwargs):
+    """Call this function when a deprecated feature is used.
+
+    If the system is configured for fatal deprecations then the message
+    is logged at the 'critical' level and :class:`DeprecatedConfig` will
+    be raised.
+
+    Otherwise, the message will be logged (once) at the 'warn' level.
+
+    :raises: :class:`DeprecatedConfig` if the system is configured for
+             fatal deprecations.
+    """
+    stdmsg = _("Deprecated: %s") % msg
+    CONF.register_opts(deprecated_opts)
+    if CONF.fatal_deprecations:
+        logger.critical(stdmsg, *args, **kwargs)
+        raise DeprecatedConfig(msg=stdmsg)
+
+    # Using a list because a tuple with dict can't be stored in a set.
+    sent_args = _deprecated_messages_sent.setdefault(msg, list())
+
+    if args in sent_args:
+        # Already logged this message, so don't log it again.
+        return
+
+    sent_args.append(args)
+    logger.warn(stdmsg, *args, **kwargs)
+
+
+class DeprecatedConfig(Exception):
+    message = _("Fatal call to deprecated config: %(msg)s")
+
+    def __init__(self, msg):
+        super(Exception, self).__init__(self.message % dict(msg=msg))
diff --git a/keystone-moon/keystone/policy/__init__.py b/keystone-moon/keystone/policy/__init__.py
new file mode 100644 (file)
index 0000000..4cd9679
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.policy import controllers  # noqa
+from keystone.policy.core import *  # noqa
+from keystone.policy import routers  # noqa
diff --git a/keystone-moon/keystone/policy/backends/__init__.py b/keystone-moon/keystone/policy/backends/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/policy/backends/rules.py b/keystone-moon/keystone/policy/backends/rules.py
new file mode 100644 (file)
index 0000000..011dd54
--- /dev/null
@@ -0,0 +1,92 @@
+# Copyright (c) 2011 OpenStack, LLC.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Policy engine for keystone"""
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_policy import policy as common_policy
+
+from keystone import exception
+from keystone import policy
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+_ENFORCER = None
+
+
+def reset():
+    global _ENFORCER
+    _ENFORCER = None
+
+
+def init():
+    global _ENFORCER
+    if not _ENFORCER:
+        _ENFORCER = common_policy.Enforcer(CONF)
+
+
+def enforce(credentials, action, target, do_raise=True):
+    """Verifies that the action is valid on the target in this context.
+
+       :param credentials: user credentials
+       :param action: string representing the action to be checked, which
+                      should be colon separated for clarity.
+       :param target: dictionary representing the object of the action
+                      for object creation this should be a dictionary
+                      representing the location of the object e.g.
+                      {'project_id': object.project_id}
+       :raises: `exception.Forbidden` if verification fails.
+
+       Actions should be colon separated for clarity. For example:
+
+        * identity:list_users
+
+    """
+    init()
+
+    # Add the exception arguments if asked to do a raise
+    extra = {}
+    if do_raise:
+        extra.update(exc=exception.ForbiddenAction, action=action,
+                     do_raise=do_raise)
+
+    return _ENFORCER.enforce(action, target, credentials, **extra)
+
+
+class Policy(policy.Driver):
+    def enforce(self, credentials, action, target):
+        LOG.debug('enforce %(action)s: %(credentials)s', {
+            'action': action,
+            'credentials': credentials})
+        enforce(credentials, action, target)
+
+    def create_policy(self, policy_id, policy):
+        raise exception.NotImplemented()
+
+    def list_policies(self):
+        raise exception.NotImplemented()
+
+    def get_policy(self, policy_id):
+        raise exception.NotImplemented()
+
+    def update_policy(self, policy_id, policy):
+        raise exception.NotImplemented()
+
+    def delete_policy(self, policy_id):
+        raise exception.NotImplemented()
diff --git a/keystone-moon/keystone/policy/backends/sql.py b/keystone-moon/keystone/policy/backends/sql.py
new file mode 100644 (file)
index 0000000..b2cccd0
--- /dev/null
@@ -0,0 +1,79 @@
+# Copyright 2012 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import sql
+from keystone import exception
+from keystone.policy.backends import rules
+
+
+class PolicyModel(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'policy'
+    attributes = ['id', 'blob', 'type']
+    id = sql.Column(sql.String(64), primary_key=True)
+    blob = sql.Column(sql.JsonBlob(), nullable=False)
+    type = sql.Column(sql.String(255), nullable=False)
+    extra = sql.Column(sql.JsonBlob())
+
+
+class Policy(rules.Policy):
+
+    @sql.handle_conflicts(conflict_type='policy')
+    def create_policy(self, policy_id, policy):
+        session = sql.get_session()
+
+        with session.begin():
+            ref = PolicyModel.from_dict(policy)
+            session.add(ref)
+
+        return ref.to_dict()
+
+    def list_policies(self):
+        session = sql.get_session()
+
+        refs = session.query(PolicyModel).all()
+        return [ref.to_dict() for ref in refs]
+
+    def _get_policy(self, session, policy_id):
+        """Private method to get a policy model object (NOT a dictionary)."""
+        ref = session.query(PolicyModel).get(policy_id)
+        if not ref:
+            raise exception.PolicyNotFound(policy_id=policy_id)
+        return ref
+
+    def get_policy(self, policy_id):
+        session = sql.get_session()
+
+        return self._get_policy(session, policy_id).to_dict()
+
+    @sql.handle_conflicts(conflict_type='policy')
+    def update_policy(self, policy_id, policy):
+        session = sql.get_session()
+
+        with session.begin():
+            ref = self._get_policy(session, policy_id)
+            old_dict = ref.to_dict()
+            old_dict.update(policy)
+            new_policy = PolicyModel.from_dict(old_dict)
+            ref.blob = new_policy.blob
+            ref.type = new_policy.type
+            ref.extra = new_policy.extra
+
+        return ref.to_dict()
+
+    def delete_policy(self, policy_id):
+        session = sql.get_session()
+
+        with session.begin():
+            ref = self._get_policy(session, policy_id)
+            session.delete(ref)
diff --git a/keystone-moon/keystone/policy/controllers.py b/keystone-moon/keystone/policy/controllers.py
new file mode 100644 (file)
index 0000000..e6eb9bc
--- /dev/null
@@ -0,0 +1,56 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import controller
+from keystone.common import dependency
+from keystone.common import validation
+from keystone import notifications
+from keystone.policy import schema
+
+
+@dependency.requires('policy_api')
+class PolicyV3(controller.V3Controller):
+    collection_name = 'policies'
+    member_name = 'policy'
+
+    @controller.protected()
+    @validation.validated(schema.policy_create, 'policy')
+    def create_policy(self, context, policy):
+        ref = self._assign_unique_id(self._normalize_dict(policy))
+        initiator = notifications._get_request_audit_info(context)
+        ref = self.policy_api.create_policy(ref['id'], ref, initiator)
+        return PolicyV3.wrap_member(context, ref)
+
+    @controller.filterprotected('type')
+    def list_policies(self, context, filters):
+        hints = PolicyV3.build_driver_hints(context, filters)
+        refs = self.policy_api.list_policies(hints=hints)
+        return PolicyV3.wrap_collection(context, refs, hints=hints)
+
+    @controller.protected()
+    def get_policy(self, context, policy_id):
+        ref = self.policy_api.get_policy(policy_id)
+        return PolicyV3.wrap_member(context, ref)
+
+    @controller.protected()
+    @validation.validated(schema.policy_update, 'policy')
+    def update_policy(self, context, policy_id, policy):
+        initiator = notifications._get_request_audit_info(context)
+        ref = self.policy_api.update_policy(policy_id, policy, initiator)
+        return PolicyV3.wrap_member(context, ref)
+
+    @controller.protected()
+    def delete_policy(self, context, policy_id):
+        initiator = notifications._get_request_audit_info(context)
+        return self.policy_api.delete_policy(policy_id, initiator)
diff --git a/keystone-moon/keystone/policy/core.py b/keystone-moon/keystone/policy/core.py
new file mode 100644 (file)
index 0000000..1f02803
--- /dev/null
@@ -0,0 +1,135 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Main entry point into the Policy service."""
+
+import abc
+
+from oslo_config import cfg
+import six
+
+from keystone.common import dependency
+from keystone.common import manager
+from keystone import exception
+from keystone import notifications
+
+
+CONF = cfg.CONF
+
+
+@dependency.provider('policy_api')
+class Manager(manager.Manager):
+    """Default pivot point for the Policy backend.
+
+    See :mod:`keystone.common.manager.Manager` for more details on how this
+    dynamically calls the backend.
+
+    """
+    _POLICY = 'policy'
+
+    def __init__(self):
+        super(Manager, self).__init__(CONF.policy.driver)
+
+    def create_policy(self, policy_id, policy, initiator=None):
+        ref = self.driver.create_policy(policy_id, policy)
+        notifications.Audit.created(self._POLICY, policy_id, initiator)
+        return ref
+
+    def get_policy(self, policy_id):
+        try:
+            return self.driver.get_policy(policy_id)
+        except exception.NotFound:
+            raise exception.PolicyNotFound(policy_id=policy_id)
+
+    def update_policy(self, policy_id, policy, initiator=None):
+        if 'id' in policy and policy_id != policy['id']:
+            raise exception.ValidationError('Cannot change policy ID')
+        try:
+            ref = self.driver.update_policy(policy_id, policy)
+        except exception.NotFound:
+            raise exception.PolicyNotFound(policy_id=policy_id)
+        notifications.Audit.updated(self._POLICY, policy_id, initiator)
+        return ref
+
+    @manager.response_truncated
+    def list_policies(self, hints=None):
+        # NOTE(henry-nash): Since the advantage of filtering or list limiting
+        # of policies at the driver level is minimal, we leave this to the
+        # caller.
+        return self.driver.list_policies()
+
+    def delete_policy(self, policy_id, initiator=None):
+        try:
+            ret = self.driver.delete_policy(policy_id)
+        except exception.NotFound:
+            raise exception.PolicyNotFound(policy_id=policy_id)
+        notifications.Audit.deleted(self._POLICY, policy_id, initiator)
+        return ret
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Driver(object):
+
+    def _get_list_limit(self):
+        return CONF.policy.list_limit or CONF.list_limit
+
+    @abc.abstractmethod
+    def enforce(self, context, credentials, action, target):
+        """Verify that a user is authorized to perform action.
+
+        For more information on a full implementation of this see:
+        `keystone.policy.backends.rules.Policy.enforce`
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def create_policy(self, policy_id, policy):
+        """Store a policy blob.
+
+        :raises: keystone.exception.Conflict
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_policies(self):
+        """List all policies."""
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_policy(self, policy_id):
+        """Retrieve a specific policy blob.
+
+        :raises: keystone.exception.PolicyNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def update_policy(self, policy_id, policy):
+        """Update a policy blob.
+
+        :raises: keystone.exception.PolicyNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_policy(self, policy_id):
+        """Remove a policy blob.
+
+        :raises: keystone.exception.PolicyNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
diff --git a/keystone-moon/keystone/policy/routers.py b/keystone-moon/keystone/policy/routers.py
new file mode 100644 (file)
index 0000000..5daadc8
--- /dev/null
@@ -0,0 +1,24 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from keystone.common import router
+from keystone.common import wsgi
+from keystone.policy import controllers
+
+
+class Routers(wsgi.RoutersBase):
+
+    def append_v3_routers(self, mapper, routers):
+        policy_controller = controllers.PolicyV3()
+        routers.append(router.Router(policy_controller, 'policies', 'policy',
+                                     resource_descriptions=self.v3_resources))
diff --git a/keystone-moon/keystone/policy/schema.py b/keystone-moon/keystone/policy/schema.py
new file mode 100644 (file)
index 0000000..512c4ce
--- /dev/null
@@ -0,0 +1,36 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+_policy_properties = {
+    'blob': {
+        'type': 'string'
+    },
+    'type': {
+        'type': 'string',
+        'maxLength': 255
+    }
+}
+
+policy_create = {
+    'type': 'object',
+    'properties': _policy_properties,
+    'required': ['blob', 'type'],
+    'additionalProperties': True
+}
+
+policy_update = {
+    'type': 'object',
+    'properties': _policy_properties,
+    'minProperties': 1,
+    'additionalProperties': True
+}
diff --git a/keystone-moon/keystone/resource/__init__.py b/keystone-moon/keystone/resource/__init__.py
new file mode 100644 (file)
index 0000000..c0070a1
--- /dev/null
@@ -0,0 +1,15 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.resource import controllers  # noqa
+from keystone.resource.core import *  # noqa
+from keystone.resource import routers  # noqa
diff --git a/keystone-moon/keystone/resource/backends/__init__.py b/keystone-moon/keystone/resource/backends/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/resource/backends/ldap.py b/keystone-moon/keystone/resource/backends/ldap.py
new file mode 100644 (file)
index 0000000..434c2b0
--- /dev/null
@@ -0,0 +1,196 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from __future__ import absolute_import
+
+import uuid
+
+from oslo_config import cfg
+from oslo_log import log
+
+from keystone import clean
+from keystone.common import driver_hints
+from keystone.common import ldap as common_ldap
+from keystone.common import models
+from keystone import exception
+from keystone.i18n import _
+from keystone.identity.backends import ldap as ldap_identity
+from keystone import resource
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+class Resource(resource.Driver):
+    def __init__(self):
+        super(Resource, self).__init__()
+        self.LDAP_URL = CONF.ldap.url
+        self.LDAP_USER = CONF.ldap.user
+        self.LDAP_PASSWORD = CONF.ldap.password
+        self.suffix = CONF.ldap.suffix
+
+        # This is the only deep dependency from resource back to identity.
+        # This is safe to do since if you are using LDAP for resource, it is
+        # required that you are using it for identity as well.
+        self.user = ldap_identity.UserApi(CONF)
+
+        self.project = ProjectApi(CONF)
+
+    def default_assignment_driver(self):
+        return 'keystone.assignment.backends.ldap.Assignment'
+
+    def _set_default_parent_project(self, ref):
+        """If the parent project ID has not been set, set it to None."""
+        if isinstance(ref, dict):
+            if 'parent_id' not in ref:
+                ref = dict(ref, parent_id=None)
+            return ref
+        elif isinstance(ref, list):
+            return [self._set_default_parent_project(x) for x in ref]
+        else:
+            raise ValueError(_('Expected dict or list: %s') % type(ref))
+
+    def _validate_parent_project_is_none(self, ref):
+        """If a parent_id different from None was given,
+           raises InvalidProjectException.
+
+        """
+        parent_id = ref.get('parent_id')
+        if parent_id is not None:
+            raise exception.InvalidParentProject(parent_id)
+
+    def _set_default_attributes(self, project_ref):
+        project_ref = self._set_default_domain(project_ref)
+        return self._set_default_parent_project(project_ref)
+
+    def get_project(self, tenant_id):
+        return self._set_default_attributes(
+            self.project.get(tenant_id))
+
+    def list_projects(self, hints):
+        return self._set_default_attributes(
+            self.project.get_all_filtered(hints))
+
+    def list_projects_in_domain(self, domain_id):
+        # We don't support multiple domains within this driver, so ignore
+        # any domain specified
+        return self.list_projects(driver_hints.Hints())
+
+    def list_projects_in_subtree(self, project_id):
+        # We don't support projects hierarchy within this driver, so a
+        # project will never have children
+        return []
+
+    def list_project_parents(self, project_id):
+        # We don't support projects hierarchy within this driver, so a
+        # project will never have parents
+        return []
+
+    def is_leaf_project(self, project_id):
+        # We don't support projects hierarchy within this driver, so a
+        # project will always be a root and a leaf at the same time
+        return True
+
+    def list_projects_from_ids(self, ids):
+        return [self.get_project(id) for id in ids]
+
+    def list_project_ids_from_domain_ids(self, domain_ids):
+        # We don't support multiple domains within this driver, so ignore
+        # any domain specified
+        return [x.id for x in self.list_projects(driver_hints.Hints())]
+
+    def get_project_by_name(self, tenant_name, domain_id):
+        self._validate_default_domain_id(domain_id)
+        return self._set_default_attributes(
+            self.project.get_by_name(tenant_name))
+
+    def create_project(self, tenant_id, tenant):
+        self.project.check_allow_create()
+        tenant = self._validate_default_domain(tenant)
+        self._validate_parent_project_is_none(tenant)
+        tenant['name'] = clean.project_name(tenant['name'])
+        data = tenant.copy()
+        if 'id' not in data or data['id'] is None:
+            data['id'] = str(uuid.uuid4().hex)
+        if 'description' in data and data['description'] in ['', None]:
+            data.pop('description')
+        return self._set_default_attributes(
+            self.project.create(data))
+
+    def update_project(self, tenant_id, tenant):
+        self.project.check_allow_update()
+        tenant = self._validate_default_domain(tenant)
+        if 'name' in tenant:
+            tenant['name'] = clean.project_name(tenant['name'])
+        return self._set_default_attributes(
+            self.project.update(tenant_id, tenant))
+
+    def delete_project(self, tenant_id):
+        self.project.check_allow_delete()
+        if self.project.subtree_delete_enabled:
+            self.project.deleteTree(tenant_id)
+        else:
+            # The manager layer will call assignments to delete the
+            # role assignments, so we just have to delete the project itself.
+            self.project.delete(tenant_id)
+
+    def create_domain(self, domain_id, domain):
+        if domain_id == CONF.identity.default_domain_id:
+            msg = _('Duplicate ID, %s.') % domain_id
+            raise exception.Conflict(type='domain', details=msg)
+        raise exception.Forbidden(_('Domains are read-only against LDAP'))
+
+    def get_domain(self, domain_id):
+        self._validate_default_domain_id(domain_id)
+        return resource.calc_default_domain()
+
+    def update_domain(self, domain_id, domain):
+        self._validate_default_domain_id(domain_id)
+        raise exception.Forbidden(_('Domains are read-only against LDAP'))
+
+    def delete_domain(self, domain_id):
+        self._validate_default_domain_id(domain_id)
+        raise exception.Forbidden(_('Domains are read-only against LDAP'))
+
+    def list_domains(self, hints):
+        return [resource.calc_default_domain()]
+
+    def list_domains_from_ids(self, ids):
+        return [resource.calc_default_domain()]
+
+    def get_domain_by_name(self, domain_name):
+        default_domain = resource.calc_default_domain()
+        if domain_name != default_domain['name']:
+            raise exception.DomainNotFound(domain_id=domain_name)
+        return default_domain
+
+
+# TODO(termie): turn this into a data object and move logic to driver
+class ProjectApi(common_ldap.ProjectLdapStructureMixin,
+                 common_ldap.EnabledEmuMixIn, common_ldap.BaseLdap):
+
+    model = models.Project
+
+    def create(self, values):
+        data = values.copy()
+        if data.get('id') is None:
+            data['id'] = uuid.uuid4().hex
+        return super(ProjectApi, self).create(data)
+
+    def update(self, project_id, values):
+        old_obj = self.get(project_id)
+        return super(ProjectApi, self).update(project_id, values, old_obj)
+
+    def get_all_filtered(self, hints):
+        query = self.filter_query(hints)
+        return super(ProjectApi, self).get_all(query)
diff --git a/keystone-moon/keystone/resource/backends/sql.py b/keystone-moon/keystone/resource/backends/sql.py
new file mode 100644 (file)
index 0000000..fb11724
--- /dev/null
@@ -0,0 +1,260 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+from oslo_log import log
+
+from keystone import clean
+from keystone.common import sql
+from keystone import exception
+from keystone.i18n import _LE
+from keystone import resource as keystone_resource
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+class Resource(keystone_resource.Driver):
+
+    def default_assignment_driver(self):
+        return 'keystone.assignment.backends.sql.Assignment'
+
+    def _get_project(self, session, project_id):
+        project_ref = session.query(Project).get(project_id)
+        if project_ref is None:
+            raise exception.ProjectNotFound(project_id=project_id)
+        return project_ref
+
+    def get_project(self, tenant_id):
+        with sql.transaction() as session:
+            return self._get_project(session, tenant_id).to_dict()
+
+    def get_project_by_name(self, tenant_name, domain_id):
+        with sql.transaction() as session:
+            query = session.query(Project)
+            query = query.filter_by(name=tenant_name)
+            query = query.filter_by(domain_id=domain_id)
+            try:
+                project_ref = query.one()
+            except sql.NotFound:
+                raise exception.ProjectNotFound(project_id=tenant_name)
+            return project_ref.to_dict()
+
+    @sql.truncated
+    def list_projects(self, hints):
+        with sql.transaction() as session:
+            query = session.query(Project)
+            project_refs = sql.filter_limit_query(Project, query, hints)
+            return [project_ref.to_dict() for project_ref in project_refs]
+
+    def list_projects_from_ids(self, ids):
+        if not ids:
+            return []
+        else:
+            with sql.transaction() as session:
+                query = session.query(Project)
+                query = query.filter(Project.id.in_(ids))
+                return [project_ref.to_dict() for project_ref in query.all()]
+
+    def list_project_ids_from_domain_ids(self, domain_ids):
+        if not domain_ids:
+            return []
+        else:
+            with sql.transaction() as session:
+                query = session.query(Project.id)
+                query = (
+                    query.filter(Project.domain_id.in_(domain_ids)))
+                return [x.id for x in query.all()]
+
+    def list_projects_in_domain(self, domain_id):
+        with sql.transaction() as session:
+            self._get_domain(session, domain_id)
+            query = session.query(Project)
+            project_refs = query.filter_by(domain_id=domain_id)
+            return [project_ref.to_dict() for project_ref in project_refs]
+
+    def _get_children(self, session, project_ids):
+        query = session.query(Project)
+        query = query.filter(Project.parent_id.in_(project_ids))
+        project_refs = query.all()
+        return [project_ref.to_dict() for project_ref in project_refs]
+
+    def list_projects_in_subtree(self, project_id):
+        with sql.transaction() as session:
+            project = self._get_project(session, project_id).to_dict()
+            children = self._get_children(session, [project['id']])
+            subtree = []
+            examined = set(project['id'])
+            while children:
+                children_ids = set()
+                for ref in children:
+                    if ref['id'] in examined:
+                        msg = _LE('Circular reference or a repeated '
+                                  'entry found in projects hierarchy - '
+                                  '%(project_id)s.')
+                        LOG.error(msg, {'project_id': ref['id']})
+                        return
+                    children_ids.add(ref['id'])
+
+                examined.union(children_ids)
+                subtree += children
+                children = self._get_children(session, children_ids)
+            return subtree
+
+    def list_project_parents(self, project_id):
+        with sql.transaction() as session:
+            project = self._get_project(session, project_id).to_dict()
+            parents = []
+            examined = set()
+            while project.get('parent_id') is not None:
+                if project['id'] in examined:
+                    msg = _LE('Circular reference or a repeated '
+                              'entry found in projects hierarchy - '
+                              '%(project_id)s.')
+                    LOG.error(msg, {'project_id': project['id']})
+                    return
+
+                examined.add(project['id'])
+                parent_project = self._get_project(
+                    session, project['parent_id']).to_dict()
+                parents.append(parent_project)
+                project = parent_project
+            return parents
+
+    def is_leaf_project(self, project_id):
+        with sql.transaction() as session:
+            project_refs = self._get_children(session, [project_id])
+            return not project_refs
+
+    # CRUD
+    @sql.handle_conflicts(conflict_type='project')
+    def create_project(self, tenant_id, tenant):
+        tenant['name'] = clean.project_name(tenant['name'])
+        with sql.transaction() as session:
+            tenant_ref = Project.from_dict(tenant)
+            session.add(tenant_ref)
+            return tenant_ref.to_dict()
+
+    @sql.handle_conflicts(conflict_type='project')
+    def update_project(self, tenant_id, tenant):
+        if 'name' in tenant:
+            tenant['name'] = clean.project_name(tenant['name'])
+
+        with sql.transaction() as session:
+            tenant_ref = self._get_project(session, tenant_id)
+            old_project_dict = tenant_ref.to_dict()
+            for k in tenant:
+                old_project_dict[k] = tenant[k]
+            new_project = Project.from_dict(old_project_dict)
+            for attr in Project.attributes:
+                if attr != 'id':
+                    setattr(tenant_ref, attr, getattr(new_project, attr))
+            tenant_ref.extra = new_project.extra
+            return tenant_ref.to_dict(include_extra_dict=True)
+
+    @sql.handle_conflicts(conflict_type='project')
+    def delete_project(self, tenant_id):
+        with sql.transaction() as session:
+            tenant_ref = self._get_project(session, tenant_id)
+            session.delete(tenant_ref)
+
+    # domain crud
+
+    @sql.handle_conflicts(conflict_type='domain')
+    def create_domain(self, domain_id, domain):
+        with sql.transaction() as session:
+            ref = Domain.from_dict(domain)
+            session.add(ref)
+        return ref.to_dict()
+
+    @sql.truncated
+    def list_domains(self, hints):
+        with sql.transaction() as session:
+            query = session.query(Domain)
+            refs = sql.filter_limit_query(Domain, query, hints)
+            return [ref.to_dict() for ref in refs]
+
+    def list_domains_from_ids(self, ids):
+        if not ids:
+            return []
+        else:
+            with sql.transaction() as session:
+                query = session.query(Domain)
+                query = query.filter(Domain.id.in_(ids))
+                domain_refs = query.all()
+                return [domain_ref.to_dict() for domain_ref in domain_refs]
+
+    def _get_domain(self, session, domain_id):
+        ref = session.query(Domain).get(domain_id)
+        if ref is None:
+            raise exception.DomainNotFound(domain_id=domain_id)
+        return ref
+
+    def get_domain(self, domain_id):
+        with sql.transaction() as session:
+            return self._get_domain(session, domain_id).to_dict()
+
+    def get_domain_by_name(self, domain_name):
+        with sql.transaction() as session:
+            try:
+                ref = (session.query(Domain).
+                       filter_by(name=domain_name).one())
+            except sql.NotFound:
+                raise exception.DomainNotFound(domain_id=domain_name)
+            return ref.to_dict()
+
+    @sql.handle_conflicts(conflict_type='domain')
+    def update_domain(self, domain_id, domain):
+        with sql.transaction() as session:
+            ref = self._get_domain(session, domain_id)
+            old_dict = ref.to_dict()
+            for k in domain:
+                old_dict[k] = domain[k]
+            new_domain = Domain.from_dict(old_dict)
+            for attr in Domain.attributes:
+                if attr != 'id':
+                    setattr(ref, attr, getattr(new_domain, attr))
+            ref.extra = new_domain.extra
+            return ref.to_dict()
+
+    def delete_domain(self, domain_id):
+        with sql.transaction() as session:
+            ref = self._get_domain(session, domain_id)
+            session.delete(ref)
+
+
+class Domain(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'domain'
+    attributes = ['id', 'name', 'enabled']
+    id = sql.Column(sql.String(64), primary_key=True)
+    name = sql.Column(sql.String(64), nullable=False)
+    enabled = sql.Column(sql.Boolean, default=True, nullable=False)
+    extra = sql.Column(sql.JsonBlob())
+    __table_args__ = (sql.UniqueConstraint('name'), {})
+
+
+class Project(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'project'
+    attributes = ['id', 'name', 'domain_id', 'description', 'enabled',
+                  'parent_id']
+    id = sql.Column(sql.String(64), primary_key=True)
+    name = sql.Column(sql.String(64), nullable=False)
+    domain_id = sql.Column(sql.String(64), sql.ForeignKey('domain.id'),
+                           nullable=False)
+    description = sql.Column(sql.Text())
+    enabled = sql.Column(sql.Boolean)
+    extra = sql.Column(sql.JsonBlob())
+    parent_id = sql.Column(sql.String(64), sql.ForeignKey('project.id'))
+    # Unique constraint across two columns to create the separation
+    # rather than just only 'name' being unique
+    __table_args__ = (sql.UniqueConstraint('domain_id', 'name'), {})
diff --git a/keystone-moon/keystone/resource/config_backends/__init__.py b/keystone-moon/keystone/resource/config_backends/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/resource/config_backends/sql.py b/keystone-moon/keystone/resource/config_backends/sql.py
new file mode 100644 (file)
index 0000000..e54bf22
--- /dev/null
@@ -0,0 +1,119 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import sql
+from keystone import exception
+from keystone.i18n import _
+from keystone import resource
+
+
+class WhiteListedConfig(sql.ModelBase, sql.ModelDictMixin):
+    __tablename__ = 'whitelisted_config'
+    domain_id = sql.Column(sql.String(64), primary_key=True)
+    group = sql.Column(sql.String(255), primary_key=True)
+    option = sql.Column(sql.String(255), primary_key=True)
+    value = sql.Column(sql.JsonBlob(), nullable=False)
+
+    def to_dict(self):
+        d = super(WhiteListedConfig, self).to_dict()
+        d.pop('domain_id')
+        return d
+
+
+class SensitiveConfig(sql.ModelBase, sql.ModelDictMixin):
+    __tablename__ = 'sensitive_config'
+    domain_id = sql.Column(sql.String(64), primary_key=True)
+    group = sql.Column(sql.String(255), primary_key=True)
+    option = sql.Column(sql.String(255), primary_key=True)
+    value = sql.Column(sql.JsonBlob(), nullable=False)
+
+    def to_dict(self):
+        d = super(SensitiveConfig, self).to_dict()
+        d.pop('domain_id')
+        return d
+
+
+class DomainConfig(resource.DomainConfigDriver):
+
+    def choose_table(self, sensitive):
+        if sensitive:
+            return SensitiveConfig
+        else:
+            return WhiteListedConfig
+
+    @sql.handle_conflicts(conflict_type='domain_config')
+    def create_config_option(self, domain_id, group, option, value,
+                             sensitive=False):
+        with sql.transaction() as session:
+            config_table = self.choose_table(sensitive)
+            ref = config_table(domain_id=domain_id, group=group,
+                               option=option, value=value)
+            session.add(ref)
+        return ref.to_dict()
+
+    def _get_config_option(self, session, domain_id, group, option, sensitive):
+        try:
+            config_table = self.choose_table(sensitive)
+            ref = (session.query(config_table).
+                   filter_by(domain_id=domain_id, group=group,
+                             option=option).one())
+        except sql.NotFound:
+            msg = _('option %(option)s in group %(group)s') % {
+                'group': group, 'option': option}
+            raise exception.DomainConfigNotFound(
+                domain_id=domain_id, group_or_option=msg)
+        return ref
+
+    def get_config_option(self, domain_id, group, option, sensitive=False):
+        with sql.transaction() as session:
+            ref = self._get_config_option(session, domain_id, group, option,
+                                          sensitive)
+        return ref.to_dict()
+
+    def list_config_options(self, domain_id, group=None, option=None,
+                            sensitive=False):
+        with sql.transaction() as session:
+            config_table = self.choose_table(sensitive)
+            query = session.query(config_table)
+            query = query.filter_by(domain_id=domain_id)
+            if group:
+                query = query.filter_by(group=group)
+                if option:
+                    query = query.filter_by(option=option)
+            return [ref.to_dict() for ref in query.all()]
+
+    def update_config_option(self, domain_id, group, option, value,
+                             sensitive=False):
+        with sql.transaction() as session:
+            ref = self._get_config_option(session, domain_id, group, option,
+                                          sensitive)
+            ref.value = value
+        return ref.to_dict()
+
+    def delete_config_options(self, domain_id, group=None, option=None,
+                              sensitive=False):
+        """Deletes config options that match the filter parameters.
+
+        Since the public API is broken down into calls for delete in both the
+        whitelisted and sensitive methods, we are silent at the driver level
+        if there was nothing to delete.
+
+        """
+        with sql.transaction() as session:
+            config_table = self.choose_table(sensitive)
+            query = session.query(config_table)
+            query = query.filter_by(domain_id=domain_id)
+            if group:
+                query = query.filter_by(group=group)
+                if option:
+                    query = query.filter_by(option=option)
+            query.delete(False)
diff --git a/keystone-moon/keystone/resource/controllers.py b/keystone-moon/keystone/resource/controllers.py
new file mode 100644 (file)
index 0000000..886b5eb
--- /dev/null
@@ -0,0 +1,281 @@
+# Copyright 2013 Metacloud, Inc.
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Workflow Logic the Resource service."""
+
+import uuid
+
+from oslo_config import cfg
+from oslo_log import log
+
+from keystone.common import controller
+from keystone.common import dependency
+from keystone.common import validation
+from keystone.common import wsgi
+from keystone import exception
+from keystone.i18n import _
+from keystone import notifications
+from keystone.resource import schema
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+@dependency.requires('resource_api')
+class Tenant(controller.V2Controller):
+
+    @controller.v2_deprecated
+    def get_all_projects(self, context, **kw):
+        """Gets a list of all tenants for an admin user."""
+        if 'name' in context['query_string']:
+            return self.get_project_by_name(
+                context, context['query_string'].get('name'))
+
+        self.assert_admin(context)
+        tenant_refs = self.resource_api.list_projects_in_domain(
+            CONF.identity.default_domain_id)
+        for tenant_ref in tenant_refs:
+            tenant_ref = self.filter_domain_id(tenant_ref)
+        params = {
+            'limit': context['query_string'].get('limit'),
+            'marker': context['query_string'].get('marker'),
+        }
+        return self.format_project_list(tenant_refs, **params)
+
+    @controller.v2_deprecated
+    def get_project(self, context, tenant_id):
+        # TODO(termie): this stuff should probably be moved to middleware
+        self.assert_admin(context)
+        ref = self.resource_api.get_project(tenant_id)
+        return {'tenant': self.filter_domain_id(ref)}
+
+    @controller.v2_deprecated
+    def get_project_by_name(self, context, tenant_name):
+        self.assert_admin(context)
+        ref = self.resource_api.get_project_by_name(
+            tenant_name, CONF.identity.default_domain_id)
+        return {'tenant': self.filter_domain_id(ref)}
+
+    # CRUD Extension
+    @controller.v2_deprecated
+    def create_project(self, context, tenant):
+        tenant_ref = self._normalize_dict(tenant)
+
+        if 'name' not in tenant_ref or not tenant_ref['name']:
+            msg = _('Name field is required and cannot be empty')
+            raise exception.ValidationError(message=msg)
+
+        self.assert_admin(context)
+        tenant_ref['id'] = tenant_ref.get('id', uuid.uuid4().hex)
+        tenant = self.resource_api.create_project(
+            tenant_ref['id'],
+            self._normalize_domain_id(context, tenant_ref))
+        return {'tenant': self.filter_domain_id(tenant)}
+
+    @controller.v2_deprecated
+    def update_project(self, context, tenant_id, tenant):
+        self.assert_admin(context)
+        # Remove domain_id if specified - a v2 api caller should not
+        # be specifying that
+        clean_tenant = tenant.copy()
+        clean_tenant.pop('domain_id', None)
+
+        tenant_ref = self.resource_api.update_project(
+            tenant_id, clean_tenant)
+        return {'tenant': tenant_ref}
+
+    @controller.v2_deprecated
+    def delete_project(self, context, tenant_id):
+        self.assert_admin(context)
+        self.resource_api.delete_project(tenant_id)
+
+
+@dependency.requires('resource_api')
+class DomainV3(controller.V3Controller):
+    collection_name = 'domains'
+    member_name = 'domain'
+
+    def __init__(self):
+        super(DomainV3, self).__init__()
+        self.get_member_from_driver = self.resource_api.get_domain
+
+    @controller.protected()
+    @validation.validated(schema.domain_create, 'domain')
+    def create_domain(self, context, domain):
+        ref = self._assign_unique_id(self._normalize_dict(domain))
+        initiator = notifications._get_request_audit_info(context)
+        ref = self.resource_api.create_domain(ref['id'], ref, initiator)
+        return DomainV3.wrap_member(context, ref)
+
+    @controller.filterprotected('enabled', 'name')
+    def list_domains(self, context, filters):
+        hints = DomainV3.build_driver_hints(context, filters)
+        refs = self.resource_api.list_domains(hints=hints)
+        return DomainV3.wrap_collection(context, refs, hints=hints)
+
+    @controller.protected()
+    def get_domain(self, context, domain_id):
+        ref = self.resource_api.get_domain(domain_id)
+        return DomainV3.wrap_member(context, ref)
+
+    @controller.protected()
+    @validation.validated(schema.domain_update, 'domain')
+    def update_domain(self, context, domain_id, domain):
+        self._require_matching_id(domain_id, domain)
+        initiator = notifications._get_request_audit_info(context)
+        ref = self.resource_api.update_domain(domain_id, domain, initiator)
+        return DomainV3.wrap_member(context, ref)
+
+    @controller.protected()
+    def delete_domain(self, context, domain_id):
+        initiator = notifications._get_request_audit_info(context)
+        return self.resource_api.delete_domain(domain_id, initiator)
+
+
+@dependency.requires('domain_config_api')
+class DomainConfigV3(controller.V3Controller):
+    member_name = 'config'
+
+    @controller.protected()
+    def create_domain_config(self, context, domain_id, config):
+        original_config = (
+            self.domain_config_api.get_config_with_sensitive_info(domain_id))
+        ref = self.domain_config_api.create_config(domain_id, config)
+        if original_config:
+            # Return status code 200, since config already existed
+            return wsgi.render_response(body={self.member_name: ref})
+        else:
+            return wsgi.render_response(body={self.member_name: ref},
+                                        status=('201', 'Created'))
+
+    @controller.protected()
+    def get_domain_config(self, context, domain_id, group=None, option=None):
+        ref = self.domain_config_api.get_config(domain_id, group, option)
+        return {self.member_name: ref}
+
+    @controller.protected()
+    def update_domain_config(
+            self, context, domain_id, config, group, option):
+        ref = self.domain_config_api.update_config(
+            domain_id, config, group, option)
+        return wsgi.render_response(body={self.member_name: ref})
+
+    def update_domain_config_group(self, context, domain_id, group, config):
+        return self.update_domain_config(
+            context, domain_id, config, group, option=None)
+
+    def update_domain_config_only(self, context, domain_id, config):
+        return self.update_domain_config(
+            context, domain_id, config, group=None, option=None)
+
+    @controller.protected()
+    def delete_domain_config(
+            self, context, domain_id, group=None, option=None):
+        self.domain_config_api.delete_config(domain_id, group, option)
+
+
+@dependency.requires('resource_api')
+class ProjectV3(controller.V3Controller):
+    collection_name = 'projects'
+    member_name = 'project'
+
+    def __init__(self):
+        super(ProjectV3, self).__init__()
+        self.get_member_from_driver = self.resource_api.get_project
+
+    @controller.protected()
+    @validation.validated(schema.project_create, 'project')
+    def create_project(self, context, project):
+        ref = self._assign_unique_id(self._normalize_dict(project))
+        ref = self._normalize_domain_id(context, ref)
+        initiator = notifications._get_request_audit_info(context)
+        ref = self.resource_api.create_project(ref['id'], ref,
+                                               initiator=initiator)
+        return ProjectV3.wrap_member(context, ref)
+
+    @controller.filterprotected('domain_id', 'enabled', 'name',
+                                'parent_id')
+    def list_projects(self, context, filters):
+        hints = ProjectV3.build_driver_hints(context, filters)
+        refs = self.resource_api.list_projects(hints=hints)
+        return ProjectV3.wrap_collection(context, refs, hints=hints)
+
+    def _expand_project_ref(self, context, ref):
+        params = context['query_string']
+
+        parents_as_list = 'parents_as_list' in params and (
+            self.query_filter_is_true(params['parents_as_list']))
+        parents_as_ids = 'parents_as_ids' in params and (
+            self.query_filter_is_true(params['parents_as_ids']))
+
+        subtree_as_list = 'subtree_as_list' in params and (
+            self.query_filter_is_true(params['subtree_as_list']))
+        subtree_as_ids = 'subtree_as_ids' in params and (
+            self.query_filter_is_true(params['subtree_as_ids']))
+
+        # parents_as_list and parents_as_ids are mutually exclusive
+        if parents_as_list and parents_as_ids:
+            msg = _('Cannot use parents_as_list and parents_as_ids query '
+                    'params at the same time.')
+            raise exception.ValidationError(msg)
+
+        # subtree_as_list and subtree_as_ids are mutually exclusive
+        if subtree_as_list and subtree_as_ids:
+            msg = _('Cannot use subtree_as_list and subtree_as_ids query '
+                    'params at the same time.')
+            raise exception.ValidationError(msg)
+
+        user_id = self.get_auth_context(context).get('user_id')
+
+        if parents_as_list:
+            parents = self.resource_api.list_project_parents(
+                ref['id'], user_id)
+            ref['parents'] = [ProjectV3.wrap_member(context, p)
+                              for p in parents]
+        elif parents_as_ids:
+            ref['parents'] = self.resource_api.get_project_parents_as_ids(ref)
+
+        if subtree_as_list:
+            subtree = self.resource_api.list_projects_in_subtree(
+                ref['id'], user_id)
+            ref['subtree'] = [ProjectV3.wrap_member(context, p)
+                              for p in subtree]
+        elif subtree_as_ids:
+            ref['subtree'] = self.resource_api.get_projects_in_subtree_as_ids(
+                ref['id'])
+
+    @controller.protected()
+    def get_project(self, context, project_id):
+        ref = self.resource_api.get_project(project_id)
+        self._expand_project_ref(context, ref)
+        return ProjectV3.wrap_member(context, ref)
+
+    @controller.protected()
+    @validation.validated(schema.project_update, 'project')
+    def update_project(self, context, project_id, project):
+        self._require_matching_id(project_id, project)
+        self._require_matching_domain_id(
+            project_id, project, self.resource_api.get_project)
+        initiator = notifications._get_request_audit_info(context)
+        ref = self.resource_api.update_project(project_id, project,
+                                               initiator=initiator)
+        return ProjectV3.wrap_member(context, ref)
+
+    @controller.protected()
+    def delete_project(self, context, project_id):
+        initiator = notifications._get_request_audit_info(context)
+        return self.resource_api.delete_project(project_id,
+                                                initiator=initiator)
diff --git a/keystone-moon/keystone/resource/core.py b/keystone-moon/keystone/resource/core.py
new file mode 100644 (file)
index 0000000..017eb4e
--- /dev/null
@@ -0,0 +1,1324 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Main entry point into the resource service."""
+
+import abc
+
+from oslo_config import cfg
+from oslo_log import log
+import six
+
+from keystone import clean
+from keystone.common import cache
+from keystone.common import dependency
+from keystone.common import driver_hints
+from keystone.common import manager
+from keystone.contrib import federation
+from keystone import exception
+from keystone.i18n import _, _LE, _LW
+from keystone import notifications
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+MEMOIZE = cache.get_memoization_decorator(section='resource')
+
+
+def calc_default_domain():
+    return {'description':
+            (u'Owns users and tenants (i.e. projects)'
+                ' available on Identity API v2.'),
+            'enabled': True,
+            'id': CONF.identity.default_domain_id,
+            'name': u'Default'}
+
+
+@dependency.provider('resource_api')
+@dependency.requires('assignment_api', 'credential_api', 'domain_config_api',
+                     'identity_api', 'revoke_api')
+class Manager(manager.Manager):
+    """Default pivot point for the resource backend.
+
+    See :mod:`keystone.common.manager.Manager` for more details on how this
+    dynamically calls the backend.
+
+    """
+    _DOMAIN = 'domain'
+    _PROJECT = 'project'
+
+    def __init__(self):
+        # If there is a specific driver specified for resource, then use it.
+        # Otherwise retrieve the driver type from the assignment driver.
+        resource_driver = CONF.resource.driver
+
+        if resource_driver is None:
+            assignment_driver = (
+                dependency.get_provider('assignment_api').driver)
+            resource_driver = assignment_driver.default_resource_driver()
+
+        super(Manager, self).__init__(resource_driver)
+
+    def _get_hierarchy_depth(self, parents_list):
+        return len(parents_list) + 1
+
+    def _assert_max_hierarchy_depth(self, project_id, parents_list=None):
+        if parents_list is None:
+            parents_list = self.list_project_parents(project_id)
+        max_depth = CONF.max_project_tree_depth
+        if self._get_hierarchy_depth(parents_list) > max_depth:
+            raise exception.ForbiddenAction(
+                action=_('max hierarchy depth reached for '
+                         '%s branch.') % project_id)
+
+    def create_project(self, tenant_id, tenant, initiator=None):
+        tenant = tenant.copy()
+        tenant.setdefault('enabled', True)
+        tenant['enabled'] = clean.project_enabled(tenant['enabled'])
+        tenant.setdefault('description', '')
+        tenant.setdefault('parent_id', None)
+
+        if tenant.get('parent_id') is not None:
+            parent_ref = self.get_project(tenant.get('parent_id'))
+            parents_list = self.list_project_parents(parent_ref['id'])
+            parents_list.append(parent_ref)
+            for ref in parents_list:
+                if ref.get('domain_id') != tenant.get('domain_id'):
+                    raise exception.ForbiddenAction(
+                        action=_('cannot create a project within a different '
+                                 'domain than its parents.'))
+                if not ref.get('enabled', True):
+                    raise exception.ForbiddenAction(
+                        action=_('cannot create a project in a '
+                                 'branch containing a disabled '
+                                 'project: %s') % ref['id'])
+            self._assert_max_hierarchy_depth(tenant.get('parent_id'),
+                                             parents_list)
+
+        ret = self.driver.create_project(tenant_id, tenant)
+        notifications.Audit.created(self._PROJECT, tenant_id, initiator)
+        if MEMOIZE.should_cache(ret):
+            self.get_project.set(ret, self, tenant_id)
+            self.get_project_by_name.set(ret, self, ret['name'],
+                                         ret['domain_id'])
+        return ret
+
+    def assert_domain_enabled(self, domain_id, domain=None):
+        """Assert the Domain is enabled.
+
+        :raise AssertionError if domain is disabled.
+        """
+        if domain is None:
+            domain = self.get_domain(domain_id)
+        if not domain.get('enabled', True):
+            raise AssertionError(_('Domain is disabled: %s') % domain_id)
+
+    def assert_domain_not_federated(self, domain_id, domain):
+        """Assert the Domain's name and id do not match the reserved keyword.
+
+        Note that the reserved keyword is defined in the configuration file,
+        by default, it is 'Federated', it is also case insensitive.
+        If config's option is empty the default hardcoded value 'Federated'
+        will be used.
+
+        :raise AssertionError if domain named match the value in the config.
+
+        """
+        # NOTE(marek-denis): We cannot create this attribute in the __init__ as
+        # config values are always initialized to default value.
+        federated_domain = (CONF.federation.federated_domain_name or
+                            federation.FEDERATED_DOMAIN_KEYWORD).lower()
+        if (domain.get('name') and domain['name'].lower() == federated_domain):
+            raise AssertionError(_('Domain cannot be named %s')
+                                 % federated_domain)
+        if (domain_id.lower() == federated_domain):
+            raise AssertionError(_('Domain cannot have ID %s')
+                                 % federated_domain)
+
+    def assert_project_enabled(self, project_id, project=None):
+        """Assert the project is enabled and its associated domain is enabled.
+
+        :raise AssertionError if the project or domain is disabled.
+        """
+        if project is None:
+            project = self.get_project(project_id)
+        self.assert_domain_enabled(domain_id=project['domain_id'])
+        if not project.get('enabled', True):
+            raise AssertionError(_('Project is disabled: %s') % project_id)
+
+    @notifications.disabled(_PROJECT, public=False)
+    def _disable_project(self, project_id):
+        """Emit a notification to the callback system project is been disabled.
+
+        This method, and associated callback listeners, removes the need for
+        making direct calls to other managers to take action (e.g. revoking
+        project scoped tokens) when a project is disabled.
+
+        :param project_id: project identifier
+        :type project_id: string
+        """
+        pass
+
+    def _assert_all_parents_are_enabled(self, project_id):
+        parents_list = self.list_project_parents(project_id)
+        for project in parents_list:
+            if not project.get('enabled', True):
+                raise exception.ForbiddenAction(
+                    action=_('cannot enable project %s since it has '
+                             'disabled parents') % project_id)
+
+    def _assert_whole_subtree_is_disabled(self, project_id):
+        subtree_list = self.driver.list_projects_in_subtree(project_id)
+        for ref in subtree_list:
+            if ref.get('enabled', True):
+                raise exception.ForbiddenAction(
+                    action=_('cannot disable project %s since '
+                             'its subtree contains enabled '
+                             'projects') % project_id)
+
+    def update_project(self, tenant_id, tenant, initiator=None):
+        original_tenant = self.driver.get_project(tenant_id)
+        tenant = tenant.copy()
+
+        parent_id = original_tenant.get('parent_id')
+        if 'parent_id' in tenant and tenant.get('parent_id') != parent_id:
+            raise exception.ForbiddenAction(
+                action=_('Update of `parent_id` is not allowed.'))
+
+        if 'enabled' in tenant:
+            tenant['enabled'] = clean.project_enabled(tenant['enabled'])
+
+        # NOTE(rodrigods): for the current implementation we only allow to
+        # disable a project if all projects below it in the hierarchy are
+        # already disabled. This also means that we can not enable a
+        # project that has disabled parents.
+        original_tenant_enabled = original_tenant.get('enabled', True)
+        tenant_enabled = tenant.get('enabled', True)
+        if not original_tenant_enabled and tenant_enabled:
+            self._assert_all_parents_are_enabled(tenant_id)
+        if original_tenant_enabled and not tenant_enabled:
+            self._assert_whole_subtree_is_disabled(tenant_id)
+            self._disable_project(tenant_id)
+
+        ret = self.driver.update_project(tenant_id, tenant)
+        notifications.Audit.updated(self._PROJECT, tenant_id, initiator)
+        self.get_project.invalidate(self, tenant_id)
+        self.get_project_by_name.invalidate(self, original_tenant['name'],
+                                            original_tenant['domain_id'])
+        return ret
+
+    def delete_project(self, tenant_id, initiator=None):
+        if not self.driver.is_leaf_project(tenant_id):
+            raise exception.ForbiddenAction(
+                action=_('cannot delete the project %s since it is not '
+                         'a leaf in the hierarchy.') % tenant_id)
+
+        project = self.driver.get_project(tenant_id)
+        project_user_ids = (
+            self.assignment_api.list_user_ids_for_project(tenant_id))
+        for user_id in project_user_ids:
+            payload = {'user_id': user_id, 'project_id': tenant_id}
+            self._emit_invalidate_user_project_tokens_notification(payload)
+        ret = self.driver.delete_project(tenant_id)
+        self.assignment_api.delete_project_assignments(tenant_id)
+        self.get_project.invalidate(self, tenant_id)
+        self.get_project_by_name.invalidate(self, project['name'],
+                                            project['domain_id'])
+        self.credential_api.delete_credentials_for_project(tenant_id)
+        notifications.Audit.deleted(self._PROJECT, tenant_id, initiator)
+        return ret
+
+    def _filter_projects_list(self, projects_list, user_id):
+        user_projects = self.assignment_api.list_projects_for_user(user_id)
+        user_projects_ids = set([proj['id'] for proj in user_projects])
+        # Keep only the projects present in user_projects
+        projects_list = [proj for proj in projects_list
+                         if proj['id'] in user_projects_ids]
+
+    def list_project_parents(self, project_id, user_id=None):
+        parents = self.driver.list_project_parents(project_id)
+        # If a user_id was provided, the returned list should be filtered
+        # against the projects this user has access to.
+        if user_id:
+            self._filter_projects_list(parents, user_id)
+        return parents
+
+    def _build_parents_as_ids_dict(self, project, parents_by_id):
+        # NOTE(rodrigods): we don't rely in the order of the projects returned
+        # by the list_project_parents() method. Thus, we create a project cache
+        # (parents_by_id) in order to access each parent in constant time and
+        # traverse up the hierarchy.
+        def traverse_parents_hierarchy(project):
+            parent_id = project.get('parent_id')
+            if not parent_id:
+                return None
+
+            parent = parents_by_id[parent_id]
+            return {parent_id: traverse_parents_hierarchy(parent)}
+
+        return traverse_parents_hierarchy(project)
+
+    def get_project_parents_as_ids(self, project):
+        """Gets the IDs from the parents from a given project.
+
+        The project IDs are returned as a structured dictionary traversing up
+        the hierarchy to the top level project. For example, considering the
+        following project hierarchy::
+
+                                    A
+                                    |
+                                  +-B-+
+                                  |   |
+                                  C   D
+
+        If we query for project C parents, the expected return is the following
+        dictionary::
+
+            'parents': {
+                B['id']: {
+                    A['id']: None
+                }
+            }
+
+        """
+        parents_list = self.list_project_parents(project['id'])
+        parents_as_ids = self._build_parents_as_ids_dict(
+            project, {proj['id']: proj for proj in parents_list})
+        return parents_as_ids
+
+    def list_projects_in_subtree(self, project_id, user_id=None):
+        subtree = self.driver.list_projects_in_subtree(project_id)
+        # If a user_id was provided, the returned list should be filtered
+        # against the projects this user has access to.
+        if user_id:
+            self._filter_projects_list(subtree, user_id)
+        return subtree
+
+    def _build_subtree_as_ids_dict(self, project_id, subtree_by_parent):
+        # NOTE(rodrigods): we perform a depth first search to construct the
+        # dictionaries representing each level of the subtree hierarchy. In
+        # order to improve this traversal performance, we create a cache of
+        # projects (subtree_py_parent) that accesses in constant time the
+        # direct children of a given project.
+        def traverse_subtree_hierarchy(project_id):
+            children = subtree_by_parent.get(project_id)
+            if not children:
+                return None
+
+            children_ids = {}
+            for child in children:
+                children_ids[child['id']] = traverse_subtree_hierarchy(
+                    child['id'])
+            return children_ids
+
+        return traverse_subtree_hierarchy(project_id)
+
+    def get_projects_in_subtree_as_ids(self, project_id):
+        """Gets the IDs from the projects in the subtree from a given project.
+
+        The project IDs are returned as a structured dictionary representing
+        their hierarchy. For example, considering the following project
+        hierarchy::
+
+                                    A
+                                    |
+                                  +-B-+
+                                  |   |
+                                  C   D
+
+        If we query for project A subtree, the expected return is the following
+        dictionary::
+
+            'subtree': {
+                B['id']: {
+                    C['id']: None,
+                    D['id']: None
+                }
+            }
+
+        """
+        def _projects_indexed_by_parent(projects_list):
+            projects_by_parent = {}
+            for proj in projects_list:
+                parent_id = proj.get('parent_id')
+                if parent_id:
+                    if parent_id in projects_by_parent:
+                        projects_by_parent[parent_id].append(proj)
+                    else:
+                        projects_by_parent[parent_id] = [proj]
+            return projects_by_parent
+
+        subtree_list = self.list_projects_in_subtree(project_id)
+        subtree_as_ids = self._build_subtree_as_ids_dict(
+            project_id, _projects_indexed_by_parent(subtree_list))
+        return subtree_as_ids
+
+    @MEMOIZE
+    def get_domain(self, domain_id):
+        return self.driver.get_domain(domain_id)
+
+    @MEMOIZE
+    def get_domain_by_name(self, domain_name):
+        return self.driver.get_domain_by_name(domain_name)
+
+    def create_domain(self, domain_id, domain, initiator=None):
+        if (not self.identity_api.multiple_domains_supported and
+                domain_id != CONF.identity.default_domain_id):
+            raise exception.Forbidden(_('Multiple domains are not supported'))
+        self.assert_domain_not_federated(domain_id, domain)
+        domain.setdefault('enabled', True)
+        domain['enabled'] = clean.domain_enabled(domain['enabled'])
+        ret = self.driver.create_domain(domain_id, domain)
+
+        notifications.Audit.created(self._DOMAIN, domain_id, initiator)
+
+        if MEMOIZE.should_cache(ret):
+            self.get_domain.set(ret, self, domain_id)
+            self.get_domain_by_name.set(ret, self, ret['name'])
+        return ret
+
+    @manager.response_truncated
+    def list_domains(self, hints=None):
+        return self.driver.list_domains(hints or driver_hints.Hints())
+
+    @notifications.disabled(_DOMAIN, public=False)
+    def _disable_domain(self, domain_id):
+        """Emit a notification to the callback system domain is been disabled.
+
+        This method, and associated callback listeners, removes the need for
+        making direct calls to other managers to take action (e.g. revoking
+        domain scoped tokens) when a domain is disabled.
+
+        :param domain_id: domain identifier
+        :type domain_id: string
+        """
+        pass
+
+    def update_domain(self, domain_id, domain, initiator=None):
+        self.assert_domain_not_federated(domain_id, domain)
+        original_domain = self.driver.get_domain(domain_id)
+        if 'enabled' in domain:
+            domain['enabled'] = clean.domain_enabled(domain['enabled'])
+        ret = self.driver.update_domain(domain_id, domain)
+        notifications.Audit.updated(self._DOMAIN, domain_id, initiator)
+        # disable owned users & projects when the API user specifically set
+        #     enabled=False
+        if (original_domain.get('enabled', True) and
+                not domain.get('enabled', True)):
+            notifications.Audit.disabled(self._DOMAIN, domain_id, initiator,
+                                         public=False)
+
+        self.get_domain.invalidate(self, domain_id)
+        self.get_domain_by_name.invalidate(self, original_domain['name'])
+        return ret
+
+    def delete_domain(self, domain_id, initiator=None):
+        # explicitly forbid deleting the default domain (this should be a
+        # carefully orchestrated manual process involving configuration
+        # changes, etc)
+        if domain_id == CONF.identity.default_domain_id:
+            raise exception.ForbiddenAction(action=_('delete the default '
+                                                     'domain'))
+
+        domain = self.driver.get_domain(domain_id)
+
+        # To help avoid inadvertent deletes, we insist that the domain
+        # has been previously disabled.  This also prevents a user deleting
+        # their own domain since, once it is disabled, they won't be able
+        # to get a valid token to issue this delete.
+        if domain['enabled']:
+            raise exception.ForbiddenAction(
+                action=_('cannot delete a domain that is enabled, '
+                         'please disable it first.'))
+
+        self._delete_domain_contents(domain_id)
+        # Delete any database stored domain config
+        self.domain_config_api.delete_config_options(domain_id)
+        self.domain_config_api.delete_config_options(domain_id, sensitive=True)
+        # TODO(henry-nash): Although the controller will ensure deletion of
+        # all users & groups within the domain (which will cause all
+        # assignments for those users/groups to also be deleted), there
+        # could still be assignments on this domain for users/groups in
+        # other domains - so we should delete these here by making a call
+        # to the backend to delete all assignments for this domain.
+        # (see Bug #1277847)
+        self.driver.delete_domain(domain_id)
+        notifications.Audit.deleted(self._DOMAIN, domain_id, initiator)
+        self.get_domain.invalidate(self, domain_id)
+        self.get_domain_by_name.invalidate(self, domain['name'])
+
+    def _delete_domain_contents(self, domain_id):
+        """Delete the contents of a domain.
+
+        Before we delete a domain, we need to remove all the entities
+        that are owned by it, i.e. Projects. To do this we
+        call the delete function for these entities, which are
+        themselves responsible for deleting any credentials and role grants
+        associated with them as well as revoking any relevant tokens.
+
+        """
+
+        def _delete_projects(project, projects, examined):
+            if project['id'] in examined:
+                msg = _LE('Circular reference or a repeated entry found '
+                          'projects hierarchy - %(project_id)s.')
+                LOG.error(msg, {'project_id': project['id']})
+                return
+
+            examined.add(project['id'])
+            children = [proj for proj in projects
+                        if proj.get('parent_id') == project['id']]
+            for proj in children:
+                _delete_projects(proj, projects, examined)
+
+            try:
+                self.delete_project(project['id'])
+            except exception.ProjectNotFound:
+                LOG.debug(('Project %(projectid)s not found when '
+                           'deleting domain contents for %(domainid)s, '
+                           'continuing with cleanup.'),
+                          {'projectid': project['id'],
+                           'domainid': domain_id})
+
+        proj_refs = self.list_projects_in_domain(domain_id)
+
+        # Deleting projects recursively
+        roots = [x for x in proj_refs if x.get('parent_id') is None]
+        examined = set()
+        for project in roots:
+            _delete_projects(project, proj_refs, examined)
+
+    @manager.response_truncated
+    def list_projects(self, hints=None):
+        return self.driver.list_projects(hints or driver_hints.Hints())
+
+    # NOTE(henry-nash): list_projects_in_domain is actually an internal method
+    # and not exposed via the API.  Therefore there is no need to support
+    # driver hints for it.
+    def list_projects_in_domain(self, domain_id):
+        return self.driver.list_projects_in_domain(domain_id)
+
+    @MEMOIZE
+    def get_project(self, project_id):
+        return self.driver.get_project(project_id)
+
+    @MEMOIZE
+    def get_project_by_name(self, tenant_name, domain_id):
+        return self.driver.get_project_by_name(tenant_name, domain_id)
+
+    @notifications.internal(
+        notifications.INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE)
+    def _emit_invalidate_user_project_tokens_notification(self, payload):
+        # This notification's payload is a dict of user_id and
+        # project_id so the token provider can invalidate the tokens
+        # from persistence if persistence is enabled.
+        pass
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Driver(object):
+
+    def _get_list_limit(self):
+        return CONF.resource.list_limit or CONF.list_limit
+
+    @abc.abstractmethod
+    def get_project_by_name(self, tenant_name, domain_id):
+        """Get a tenant by name.
+
+        :returns: tenant_ref
+        :raises: keystone.exception.ProjectNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # domain crud
+    @abc.abstractmethod
+    def create_domain(self, domain_id, domain):
+        """Creates a new domain.
+
+        :raises: keystone.exception.Conflict
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_domains(self, hints):
+        """List domains in the system.
+
+        :param hints: filter hints which the driver should
+                      implement if at all possible.
+
+        :returns: a list of domain_refs or an empty list.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_domains_from_ids(self, domain_ids):
+        """List domains for the provided list of ids.
+
+        :param domain_ids: list of ids
+
+        :returns: a list of domain_refs.
+
+        This method is used internally by the assignment manager to bulk read
+        a set of domains given their ids.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_domain(self, domain_id):
+        """Get a domain by ID.
+
+        :returns: domain_ref
+        :raises: keystone.exception.DomainNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_domain_by_name(self, domain_name):
+        """Get a domain by name.
+
+        :returns: domain_ref
+        :raises: keystone.exception.DomainNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def update_domain(self, domain_id, domain):
+        """Updates an existing domain.
+
+        :raises: keystone.exception.DomainNotFound,
+                 keystone.exception.Conflict
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_domain(self, domain_id):
+        """Deletes an existing domain.
+
+        :raises: keystone.exception.DomainNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    # project crud
+    @abc.abstractmethod
+    def create_project(self, project_id, project):
+        """Creates a new project.
+
+        :raises: keystone.exception.Conflict
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_projects(self, hints):
+        """List projects in the system.
+
+        :param hints: filter hints which the driver should
+                      implement if at all possible.
+
+        :returns: a list of project_refs or an empty list.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_projects_from_ids(self, project_ids):
+        """List projects for the provided list of ids.
+
+        :param project_ids: list of ids
+
+        :returns: a list of project_refs.
+
+        This method is used internally by the assignment manager to bulk read
+        a set of projects given their ids.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_project_ids_from_domain_ids(self, domain_ids):
+        """List project ids for the provided list of domain ids.
+
+        :param domain_ids: list of domain ids
+
+        :returns: a list of project ids owned by the specified domain ids.
+
+        This method is used internally by the assignment manager to bulk read
+        a set of project ids given a list of domain ids.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_projects_in_domain(self, domain_id):
+        """List projects in the domain.
+
+        :param domain_id: the driver MUST only return projects
+                          within this domain.
+
+        :returns: a list of project_refs or an empty list.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_project(self, project_id):
+        """Get a project by ID.
+
+        :returns: project_ref
+        :raises: keystone.exception.ProjectNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def update_project(self, project_id, project):
+        """Updates an existing project.
+
+        :raises: keystone.exception.ProjectNotFound,
+                 keystone.exception.Conflict
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_project(self, project_id):
+        """Deletes an existing project.
+
+        :raises: keystone.exception.ProjectNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_project_parents(self, project_id):
+        """List all parents from a project by its ID.
+
+        :param project_id: the driver will list the parents of this
+                           project.
+
+        :returns: a list of project_refs or an empty list.
+        :raises: keystone.exception.ProjectNotFound
+
+        """
+        raise exception.NotImplemented()
+
+    @abc.abstractmethod
+    def list_projects_in_subtree(self, project_id):
+        """List all projects in the subtree below the hierarchy of the
+        given project.
+
+        :param project_id: the driver will get the subtree under
+                           this project.
+
+        :returns: a list of project_refs or an empty list
+        :raises: keystone.exception.ProjectNotFound
+
+        """
+        raise exception.NotImplemented()
+
+    @abc.abstractmethod
+    def is_leaf_project(self, project_id):
+        """Checks if a project is a leaf in the hierarchy.
+
+        :param project_id: the driver will check if this project
+                           is a leaf in the hierarchy.
+
+        :raises: keystone.exception.ProjectNotFound
+
+        """
+        raise exception.NotImplemented()
+
+    # Domain management functions for backends that only allow a single
+    # domain.  Currently, this is only LDAP, but might be used by other
+    # backends in the future.
+    def _set_default_domain(self, ref):
+        """If the domain ID has not been set, set it to the default."""
+        if isinstance(ref, dict):
+            if 'domain_id' not in ref:
+                ref = ref.copy()
+                ref['domain_id'] = CONF.identity.default_domain_id
+            return ref
+        elif isinstance(ref, list):
+            return [self._set_default_domain(x) for x in ref]
+        else:
+            raise ValueError(_('Expected dict or list: %s') % type(ref))
+
+    def _validate_default_domain(self, ref):
+        """Validate that either the default domain or nothing is specified.
+
+        Also removes the domain from the ref so that LDAP doesn't have to
+        persist the attribute.
+
+        """
+        ref = ref.copy()
+        domain_id = ref.pop('domain_id', CONF.identity.default_domain_id)
+        self._validate_default_domain_id(domain_id)
+        return ref
+
+    def _validate_default_domain_id(self, domain_id):
+        """Validate that the domain ID specified belongs to the default domain.
+
+        """
+        if domain_id != CONF.identity.default_domain_id:
+            raise exception.DomainNotFound(domain_id=domain_id)
+
+
+@dependency.provider('domain_config_api')
+class DomainConfigManager(manager.Manager):
+    """Default pivot point for the Domain Config backend."""
+
+    # NOTE(henry-nash): In order for a config option to be stored in the
+    # standard table, it must be explicitly whitelisted. Options marked as
+    # sensitive are stored in a separate table. Attempting to store options
+    # that are not listed as either whitelisted or sensitive will raise an
+    # exception.
+    #
+    # Only those options that affect the domain-specific driver support in
+    # the identity manager are supported.
+
+    whitelisted_options = {
+        'identity': ['driver'],
+        'ldap': [
+            'url', 'user', 'suffix', 'use_dumb_member', 'dumb_member',
+            'allow_subtree_delete', 'query_scope', 'page_size',
+            'alias_dereferencing', 'debug_level', 'chase_referrals',
+            'user_tree_dn', 'user_filter', 'user_objectclass',
+            'user_id_attribute', 'user_name_attribute', 'user_mail_attribute',
+            'user_pass_attribute', 'user_enabled_attribute',
+            'user_enabled_invert', 'user_enabled_mask', 'user_enabled_default',
+            'user_attribute_ignore', 'user_default_project_id_attribute',
+            'user_allow_create', 'user_allow_update', 'user_allow_delete',
+            'user_enabled_emulation', 'user_enabled_emulation_dn',
+            'user_additional_attribute_mapping', 'group_tree_dn',
+            'group_filter', 'group_objectclass', 'group_id_attribute',
+            'group_name_attribute', 'group_member_attribute',
+            'group_desc_attribute', 'group_attribute_ignore',
+            'group_allow_create', 'group_allow_update', 'group_allow_delete',
+            'group_additional_attribute_mapping', 'tls_cacertfile',
+            'tls_cacertdir', 'use_tls', 'tls_req_cert', 'use_pool',
+            'pool_size', 'pool_retry_max', 'pool_retry_delay',
+            'pool_connection_timeout', 'pool_connection_lifetime',
+            'use_auth_pool', 'auth_pool_size', 'auth_pool_connection_lifetime'
+        ]
+    }
+    sensitive_options = {
+        'identity': [],
+        'ldap': ['password']
+    }
+
+    def __init__(self):
+        super(DomainConfigManager, self).__init__(CONF.domain_config.driver)
+
+    def _assert_valid_config(self, config):
+        """Ensure the options in the config are valid.
+
+        This method is called to validate the request config in create and
+        update manager calls.
+
+        :param config: config structure being created or updated
+
+        """
+        # Something must be defined in the request
+        if not config:
+            raise exception.InvalidDomainConfig(
+                reason=_('No options specified'))
+
+        # Make sure the groups/options defined in config itself are valid
+        for group in config:
+            if (not config[group] or not
+                    isinstance(config[group], dict)):
+                msg = _('The value of group %(group)s specified in the '
+                        'config should be a dictionary of options') % {
+                            'group': group}
+                raise exception.InvalidDomainConfig(reason=msg)
+            for option in config[group]:
+                self._assert_valid_group_and_option(group, option)
+
+    def _assert_valid_group_and_option(self, group, option):
+        """Ensure the combination of group and option is valid.
+
+        :param group: optional group name, if specified it must be one
+                      we support
+        :param option: optional option name, if specified it must be one
+                       we support and a group must also be specified
+
+        """
+        if not group and not option:
+            # For all calls, it's OK for neither to be defined, it means you
+            # are operating on all config options for that domain.
+            return
+
+        if not group and option:
+            # Our API structure should prevent this from ever happening, so if
+            # it does, then this is coding error.
+            msg = _('Option %(option)s found with no group specified while '
+                    'checking domain configuration request') % {
+                        'option': option}
+            raise exception.UnexpectedError(exception=msg)
+
+        if (group and group not in self.whitelisted_options and
+                group not in self.sensitive_options):
+            msg = _('Group %(group)s is not supported '
+                    'for domain specific configurations') % {'group': group}
+            raise exception.InvalidDomainConfig(reason=msg)
+
+        if option:
+            if (option not in self.whitelisted_options[group] and option not in
+                    self.sensitive_options[group]):
+                msg = _('Option %(option)s in group %(group)s is not '
+                        'supported for domain specific configurations') % {
+                            'group': group, 'option': option}
+                raise exception.InvalidDomainConfig(reason=msg)
+
+    def _is_sensitive(self, group, option):
+        return option in self.sensitive_options[group]
+
+    def _config_to_list(self, config):
+        """Build whitelisted and sensitive lists for use by backend drivers."""
+
+        whitelisted = []
+        sensitive = []
+        for group in config:
+            for option in config[group]:
+                the_list = (sensitive if self._is_sensitive(group, option)
+                            else whitelisted)
+                the_list.append({
+                    'group': group, 'option': option,
+                    'value': config[group][option]})
+
+        return whitelisted, sensitive
+
+    def _list_to_config(self, whitelisted, sensitive=None, req_option=None):
+        """Build config dict from a list of option dicts.
+
+        :param whitelisted: list of dicts containing options and their groups,
+                            this has already been filtered to only contain
+                            those options to include in the output.
+        :param sensitive: list of dicts containing sensitive options and their
+                          groups, this has already been filtered to only
+                          contain those options to include in the output.
+        :param req_option: the individual option requested
+
+        :returns: a config dict, including sensitive if specified
+
+        """
+        the_list = whitelisted + (sensitive or [])
+        if not the_list:
+            return {}
+
+        if req_option:
+            # The request was specific to an individual option, so
+            # no need to include the group in the output. We first check that
+            # there is only one option in the answer (and that it's the right
+            # one) - if not, something has gone wrong and we raise an error
+            if len(the_list) > 1 or the_list[0]['option'] != req_option:
+                LOG.error(_LE('Unexpected results in response for domain '
+                              'config - %(count)s responses, first option is '
+                              '%(option)s, expected option %(expected)s'),
+                          {'count': len(the_list), 'option': list[0]['option'],
+                           'expected': req_option})
+                raise exception.UnexpectedError(
+                    _('An unexpected error occurred when retrieving domain '
+                      'configs'))
+            return {the_list[0]['option']: the_list[0]['value']}
+
+        config = {}
+        for option in the_list:
+            config.setdefault(option['group'], {})
+            config[option['group']][option['option']] = option['value']
+
+        return config
+
+    def create_config(self, domain_id, config):
+        """Create config for a domain
+
+        :param domain_id: the domain in question
+        :param config: the dict of config groups/options to assign to the
+                       domain
+
+        Creates a new config, overwriting any previous config (no Conflict
+        error will be generated).
+
+        :returns: a dict of group dicts containing the options, with any that
+                  are sensitive removed
+        :raises keystone.exception.InvalidDomainConfig: when the config
+                contains options we do not support
+
+        """
+        self._assert_valid_config(config)
+        whitelisted, sensitive = self._config_to_list(config)
+        # Delete any existing config
+        self.delete_config_options(domain_id)
+        self.delete_config_options(domain_id, sensitive=True)
+        # ...and create the new one
+        for option in whitelisted:
+            self.create_config_option(
+                domain_id, option['group'], option['option'], option['value'])
+        for option in sensitive:
+            self.create_config_option(
+                domain_id, option['group'], option['option'], option['value'],
+                sensitive=True)
+        return self._list_to_config(whitelisted)
+
+    def get_config(self, domain_id, group=None, option=None):
+        """Get config, or partial config, for a domain
+
+        :param domain_id: the domain in question
+        :param group: an optional specific group of options
+        :param option: an optional specific option within the group
+
+        :returns: a dict of group dicts containing the whitelisted options,
+                  filtered by group and option specified
+        :raises keystone.exception.DomainConfigNotFound: when no config found
+                that matches domain_id, group and option specified
+        :raises keystone.exception.InvalidDomainConfig: when the config
+                and group/option parameters specify an option we do not
+                support
+
+        An example response::
+
+            {
+                'ldap': {
+                    'url': 'myurl'
+                    'user_tree_dn': 'OU=myou'},
+                'identity': {
+                    'driver': 'keystone.identity.backends.ldap.Identity'}
+
+            }
+
+        """
+        self._assert_valid_group_and_option(group, option)
+        whitelisted = self.list_config_options(domain_id, group, option)
+        if whitelisted:
+            return self._list_to_config(whitelisted, req_option=option)
+
+        if option:
+            msg = _('option %(option)s in group %(group)s') % {
+                'group': group, 'option': option}
+        elif group:
+            msg = _('group %(group)s') % {'group': group}
+        else:
+            msg = _('any options')
+        raise exception.DomainConfigNotFound(
+            domain_id=domain_id, group_or_option=msg)
+
+    def update_config(self, domain_id, config, group=None, option=None):
+        """Update config, or partial config, for a domain
+
+        :param domain_id: the domain in question
+        :param config: the config dict containing and groups/options being
+                       updated
+        :param group: an optional specific group of options, which if specified
+                      must appear in config, with no other groups
+        :param option: an optional specific option within the group, which if
+                       specified must appear in config, with no other options
+
+        The contents of the supplied config will be merged with the existing
+        config for this domain, updating or creating new options if these did
+        not previously exist. If group or option is specified, then the update
+        will be limited to those specified items and the inclusion of other
+        options in the supplied config will raise an exception, as will the
+        situation when those options do not already exist in the current
+        config.
+
+        :returns: a dict of groups containing all whitelisted options
+        :raises keystone.exception.InvalidDomainConfig: when the config
+                and group/option parameters specify an option we do not
+                support or one that does not exist in the original config
+
+        """
+        def _assert_valid_update(domain_id, config, group=None, option=None):
+            """Ensure the combination of config, group and option is valid."""
+
+            self._assert_valid_config(config)
+            self._assert_valid_group_and_option(group, option)
+
+            # If a group has been specified, then the request is to
+            # explicitly only update the options in that group - so the config
+            # must not contain anything else. Further, that group must exist in
+            # the original config. Likewise, if an option has been specified,
+            # then the group in the config must only contain that option and it
+            # also must exist in the original config.
+            if group:
+                if len(config) != 1 or (option and len(config[group]) != 1):
+                    if option:
+                        msg = _('Trying to update option %(option)s in group '
+                                '%(group)s, so that, and only that, option '
+                                'must be specified  in the config') % {
+                                    'group': group, 'option': option}
+                    else:
+                        msg = _('Trying to update group %(group)s, so that, '
+                                'and only that, group must be specified in '
+                                'the config') % {'group': group}
+                    raise exception.InvalidDomainConfig(reason=msg)
+
+                # So we now know we have the right number of entries in the
+                # config that align with a group/option being specified, but we
+                # must also make sure they match.
+                if group not in config:
+                    msg = _('request to update group %(group)s, but config '
+                            'provided contains group %(group_other)s '
+                            'instead') % {
+                                'group': group,
+                                'group_other': config.keys()[0]}
+                    raise exception.InvalidDomainConfig(reason=msg)
+                if option and option not in config[group]:
+                    msg = _('Trying to update option %(option)s in group '
+                            '%(group)s, but config provided contains option '
+                            '%(option_other)s instead') % {
+                                'group': group, 'option': option,
+                                'option_other': config[group].keys()[0]}
+                    raise exception.InvalidDomainConfig(reason=msg)
+
+                # Finally, we need to check if the group/option specified
+                # already exists in the original config - since if not, to keep
+                # with the semantics of an update, we need to fail with
+                # a DomainConfigNotFound
+                if not self.get_config_with_sensitive_info(domain_id,
+                                                           group, option):
+                    if option:
+                        msg = _('option %(option)s in group %(group)s') % {
+                            'group': group, 'option': option}
+                        raise exception.DomainConfigNotFound(
+                            domain_id=domain_id, group_or_option=msg)
+                    else:
+                        msg = _('group %(group)s') % {'group': group}
+                        raise exception.DomainConfigNotFound(
+                            domain_id=domain_id, group_or_option=msg)
+
+        def _update_or_create(domain_id, option, sensitive):
+            """Update the option, if it doesn't exist then create it."""
+
+            try:
+                self.create_config_option(
+                    domain_id, option['group'], option['option'],
+                    option['value'], sensitive=sensitive)
+            except exception.Conflict:
+                self.update_config_option(
+                    domain_id, option['group'], option['option'],
+                    option['value'], sensitive=sensitive)
+
+        update_config = config
+        if group and option:
+            # The config will just be a dict containing the option and
+            # its value, so make it look like a single option under the
+            # group in question
+            update_config = {group: config}
+
+        _assert_valid_update(domain_id, update_config, group, option)
+
+        whitelisted, sensitive = self._config_to_list(update_config)
+
+        for new_option in whitelisted:
+            _update_or_create(domain_id, new_option, sensitive=False)
+        for new_option in sensitive:
+            _update_or_create(domain_id, new_option, sensitive=True)
+
+        return self.get_config(domain_id)
+
+    def delete_config(self, domain_id, group=None, option=None):
+        """Delete config, or partial config, for the domain.
+
+        :param domain_id: the domain in question
+        :param group: an optional specific group of options
+        :param option: an optional specific option within the group
+
+        If group and option are None, then the entire config for the domain
+        is deleted. If group is not None, then just that group of options will
+        be deleted. If group and option are both specified, then just that
+        option is deleted.
+
+        :raises keystone.exception.InvalidDomainConfig: when group/option
+                parameters specify an option we do not support or one that
+                does not exist in the original config.
+
+        """
+        self._assert_valid_group_and_option(group, option)
+        if group:
+            # As this is a partial delete, then make sure the items requested
+            # are valid and exist in the current config
+            current_config = self.get_config_with_sensitive_info(domain_id)
+            # Raise an exception if the group/options specified don't exist in
+            # the current config so that the delete method provides the
+            # correct error semantics.
+            current_group = current_config.get(group)
+            if not current_group:
+                msg = _('group %(group)s') % {'group': group}
+                raise exception.DomainConfigNotFound(
+                    domain_id=domain_id, group_or_option=msg)
+            if option and not current_group.get(option):
+                msg = _('option %(option)s in group %(group)s') % {
+                    'group': group, 'option': option}
+                raise exception.DomainConfigNotFound(
+                    domain_id=domain_id, group_or_option=msg)
+
+        self.delete_config_options(domain_id, group, option)
+        self.delete_config_options(domain_id, group, option, sensitive=True)
+
+    def get_config_with_sensitive_info(self, domain_id, group=None,
+                                       option=None):
+        """Get config for a domain with sensitive info included.
+
+        This method is not exposed via the public API, but is used by the
+        identity manager to initialize a domain with the fully formed config
+        options.
+
+        """
+        whitelisted = self.list_config_options(domain_id, group, option)
+        sensitive = self.list_config_options(domain_id, group, option,
+                                             sensitive=True)
+
+        # Check if there are any sensitive substitutions needed. We first try
+        # and simply ensure any sensitive options that have valid substitution
+        # references in the whitelisted options are substituted. We then check
+        # the resulting whitelisted option and raise a warning if there
+        # appears to be an unmatched or incorrectly constructed substitution
+        # reference. To avoid the risk of logging any sensitive options that
+        # have already been substituted, we first take a copy of the
+        # whitelisted option.
+
+        # Build a dict of the sensitive options ready to try substitution
+        sensitive_dict = {s['option']: s['value'] for s in sensitive}
+
+        for each_whitelisted in whitelisted:
+            if not isinstance(each_whitelisted['value'], six.string_types):
+                # We only support substitutions into string types, if its an
+                # integer, list etc. then just continue onto the next one
+                continue
+
+            # Store away the original value in case we need to raise a warning
+            # after substitution.
+            original_value = each_whitelisted['value']
+            warning_msg = ''
+            try:
+                each_whitelisted['value'] = (
+                    each_whitelisted['value'] % sensitive_dict)
+            except KeyError:
+                warning_msg = _LW(
+                    'Found what looks like an unmatched config option '
+                    'substitution reference - domain: %(domain)s, group: '
+                    '%(group)s, option: %(option)s, value: %(value)s. Perhaps '
+                    'the config option to which it refers has yet to be '
+                    'added?')
+            except (ValueError, TypeError):
+                warning_msg = _LW(
+                    'Found what looks like an incorrectly constructed '
+                    'config option substitution reference - domain: '
+                    '%(domain)s, group: %(group)s, option: %(option)s, '
+                    'value: %(value)s.')
+
+            if warning_msg:
+                LOG.warn(warning_msg % {
+                    'domain': domain_id,
+                    'group': each_whitelisted['group'],
+                    'option': each_whitelisted['option'],
+                    'value': original_value})
+
+        return self._list_to_config(whitelisted, sensitive)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class DomainConfigDriver(object):
+    """Interface description for a Domain Config driver."""
+
+    @abc.abstractmethod
+    def create_config_option(self, domain_id, group, option, value,
+                             sensitive=False):
+        """Creates a config option for a domain.
+
+        :param domain_id: the domain for this option
+        :param group: the group name
+        :param option: the option name
+        :param value: the value to assign to this option
+        :param sensitive: whether the option is sensitive
+
+        :returns: dict containing group, option and value
+        :raises: keystone.exception.Conflict
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_config_option(self, domain_id, group, option, sensitive=False):
+        """Gets the config option for a domain.
+
+        :param domain_id: the domain for this option
+        :param group: the group name
+        :param option: the option name
+        :param sensitive: whether the option is sensitive
+
+        :returns: dict containing group, option and value
+        :raises: keystone.exception.DomainConfigNotFound: the option doesn't
+                                                          exist.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_config_options(self, domain_id, group=None, option=False,
+                            sensitive=False):
+        """Gets a config options for a domain.
+
+        :param domain_id: the domain for this option
+        :param group: optional group option name
+        :param option: optional option name. If group is None, then this
+                       parameter is ignored
+        :param sensitive: whether the option is sensitive
+
+        :returns: list of dicts containing group, option and value
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def update_config_option(self, domain_id, group, option, value,
+                             sensitive=False):
+        """Updates a config option for a domain.
+
+        :param domain_id: the domain for this option
+        :param group: the group option name
+        :param option: the option name
+        :param value: the value to assign to this option
+        :param sensitive: whether the option is sensitive
+
+        :returns: dict containing updated group, option and value
+        :raises: keystone.exception.DomainConfigNotFound: the option doesn't
+                                                          exist.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_config_options(self, domain_id, group=None, option=None,
+                              sensitive=False):
+        """Deletes config options for a domain.
+
+        Allows deletion of all options for a domain, all options in a group
+        or a specific option. The driver is silent if there are no options
+        to delete.
+
+        :param domain_id: the domain for this option
+        :param group: optional group option name
+        :param option: optional option name. If group is None, then this
+                       parameter is ignored
+        :param sensitive: whether the option is sensitive
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
diff --git a/keystone-moon/keystone/resource/routers.py b/keystone-moon/keystone/resource/routers.py
new file mode 100644 (file)
index 0000000..8ccd10a
--- /dev/null
@@ -0,0 +1,94 @@
+# Copyright 2013 Metacloud, Inc.
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""WSGI Routers for the Resource service."""
+
+from keystone.common import json_home
+from keystone.common import router
+from keystone.common import wsgi
+from keystone.resource import controllers
+
+
+class Admin(wsgi.ComposableRouter):
+    def add_routes(self, mapper):
+        # Tenant Operations
+        tenant_controller = controllers.Tenant()
+        mapper.connect('/tenants',
+                       controller=tenant_controller,
+                       action='get_all_projects',
+                       conditions=dict(method=['GET']))
+        mapper.connect('/tenants/{tenant_id}',
+                       controller=tenant_controller,
+                       action='get_project',
+                       conditions=dict(method=['GET']))
+
+
+class Routers(wsgi.RoutersBase):
+
+    def append_v3_routers(self, mapper, routers):
+        routers.append(
+            router.Router(controllers.DomainV3(),
+                          'domains', 'domain',
+                          resource_descriptions=self.v3_resources))
+
+        config_controller = controllers.DomainConfigV3()
+
+        self._add_resource(
+            mapper, config_controller,
+            path='/domains/{domain_id}/config',
+            get_head_action='get_domain_config',
+            put_action='create_domain_config',
+            patch_action='update_domain_config_only',
+            delete_action='delete_domain_config',
+            rel=json_home.build_v3_resource_relation('domain_config'),
+            status=json_home.Status.EXPERIMENTAL,
+            path_vars={
+                'domain_id': json_home.Parameters.DOMAIN_ID
+            })
+
+        config_group_param = (
+            json_home.build_v3_parameter_relation('config_group'))
+        self._add_resource(
+            mapper, config_controller,
+            path='/domains/{domain_id}/config/{group}',
+            get_head_action='get_domain_config',
+            patch_action='update_domain_config_group',
+            delete_action='delete_domain_config',
+            rel=json_home.build_v3_resource_relation('domain_config_group'),
+            status=json_home.Status.EXPERIMENTAL,
+            path_vars={
+                'domain_id': json_home.Parameters.DOMAIN_ID,
+                'group': config_group_param
+            })
+
+        self._add_resource(
+            mapper, config_controller,
+            path='/domains/{domain_id}/config/{group}/{option}',
+            get_head_action='get_domain_config',
+            patch_action='update_domain_config',
+            delete_action='delete_domain_config',
+            rel=json_home.build_v3_resource_relation('domain_config_option'),
+            status=json_home.Status.EXPERIMENTAL,
+            path_vars={
+                'domain_id': json_home.Parameters.DOMAIN_ID,
+                'group': config_group_param,
+                'option': json_home.build_v3_parameter_relation(
+                    'config_option')
+            })
+
+        routers.append(
+            router.Router(controllers.ProjectV3(),
+                          'projects', 'project',
+                          resource_descriptions=self.v3_resources))
diff --git a/keystone-moon/keystone/resource/schema.py b/keystone-moon/keystone/resource/schema.py
new file mode 100644 (file)
index 0000000..0fd59e3
--- /dev/null
@@ -0,0 +1,75 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import validation
+from keystone.common.validation import parameter_types
+
+
+_project_properties = {
+    'description': validation.nullable(parameter_types.description),
+    # NOTE(lbragstad): domain_id isn't nullable according to some backends.
+    # The identity-api should be updated to be consistent with the
+    # implementation.
+    'domain_id': parameter_types.id_string,
+    'enabled': parameter_types.boolean,
+    'parent_id': validation.nullable(parameter_types.id_string),
+    'name': {
+        'type': 'string',
+        'minLength': 1,
+        'maxLength': 64
+    }
+}
+
+project_create = {
+    'type': 'object',
+    'properties': _project_properties,
+    # NOTE(lbragstad): A project name is the only parameter required for
+    # project creation according to the Identity V3 API. We should think
+    # about using the maxProperties validator here, and in update.
+    'required': ['name'],
+    'additionalProperties': True
+}
+
+project_update = {
+    'type': 'object',
+    'properties': _project_properties,
+    # NOTE(lbragstad) Make sure at least one property is being updated
+    'minProperties': 1,
+    'additionalProperties': True
+}
+
+_domain_properties = {
+    'description': validation.nullable(parameter_types.description),
+    'enabled': parameter_types.boolean,
+    'name': {
+        'type': 'string',
+        'minLength': 1,
+        'maxLength': 64
+    }
+}
+
+domain_create = {
+    'type': 'object',
+    'properties': _domain_properties,
+    # TODO(lbragstad): According to the V3 API spec, name isn't required but
+    # the current implementation in assignment.controller:DomainV3 requires a
+    # name for the domain.
+    'required': ['name'],
+    'additionalProperties': True
+}
+
+domain_update = {
+    'type': 'object',
+    'properties': _domain_properties,
+    'minProperties': 1,
+    'additionalProperties': True
+}
diff --git a/keystone-moon/keystone/routers.py b/keystone-moon/keystone/routers.py
new file mode 100644 (file)
index 0000000..a0f9ed2
--- /dev/null
@@ -0,0 +1,80 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+The only types of routers in this file should be ``ComposingRouters``.
+
+The routers for the backends should be in the backend-specific router modules.
+For example, the ``ComposableRouter`` for ``identity`` belongs in::
+
+    keystone.identity.routers
+
+"""
+
+
+from keystone.common import wsgi
+from keystone import controllers
+
+
+class Extension(wsgi.ComposableRouter):
+    def __init__(self, is_admin=True):
+        if is_admin:
+            self.controller = controllers.AdminExtensions()
+        else:
+            self.controller = controllers.PublicExtensions()
+
+    def add_routes(self, mapper):
+        extensions_controller = self.controller
+        mapper.connect('/extensions',
+                       controller=extensions_controller,
+                       action='get_extensions_info',
+                       conditions=dict(method=['GET']))
+        mapper.connect('/extensions/{extension_alias}',
+                       controller=extensions_controller,
+                       action='get_extension_info',
+                       conditions=dict(method=['GET']))
+
+
+class VersionV2(wsgi.ComposableRouter):
+    def __init__(self, description):
+        self.description = description
+
+    def add_routes(self, mapper):
+        version_controller = controllers.Version(self.description)
+        mapper.connect('/',
+                       controller=version_controller,
+                       action='get_version_v2')
+
+
+class VersionV3(wsgi.ComposableRouter):
+    def __init__(self, description, routers):
+        self.description = description
+        self._routers = routers
+
+    def add_routes(self, mapper):
+        version_controller = controllers.Version(self.description,
+                                                 routers=self._routers)
+        mapper.connect('/',
+                       controller=version_controller,
+                       action='get_version_v3')
+
+
+class Versions(wsgi.ComposableRouter):
+    def __init__(self, description):
+        self.description = description
+
+    def add_routes(self, mapper):
+        version_controller = controllers.Version(self.description)
+        mapper.connect('/',
+                       controller=version_controller,
+                       action='get_versions')
diff --git a/keystone-moon/keystone/server/__init__.py b/keystone-moon/keystone/server/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/server/common.py b/keystone-moon/keystone/server/common.py
new file mode 100644 (file)
index 0000000..fda44ee
--- /dev/null
@@ -0,0 +1,45 @@
+
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+from oslo_config import cfg
+
+from keystone import backends
+from keystone.common import dependency
+from keystone.common import sql
+from keystone import config
+
+
+CONF = cfg.CONF
+
+
+def configure(version=None, config_files=None,
+              pre_setup_logging_fn=lambda: None):
+    config.configure()
+    sql.initialize()
+    config.set_default_for_default_log_levels()
+
+    CONF(project='keystone', version=version,
+         default_config_files=config_files)
+
+    pre_setup_logging_fn()
+    config.setup_logging()
+
+
+def setup_backends(load_extra_backends_fn=lambda: {},
+                   startup_application_fn=lambda: None):
+    drivers = backends.load_backends()
+    drivers.update(load_extra_backends_fn())
+    res = startup_application_fn()
+    drivers.update(dependency.resolve_future_dependencies())
+    return drivers, res
diff --git a/keystone-moon/keystone/server/eventlet.py b/keystone-moon/keystone/server/eventlet.py
new file mode 100644 (file)
index 0000000..5bedaf9
--- /dev/null
@@ -0,0 +1,156 @@
+
+# Copyright 2013 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import logging
+import os
+import socket
+
+from oslo_concurrency import processutils
+from oslo_config import cfg
+import oslo_i18n
+import pbr.version
+
+
+# NOTE(dstanek): i18n.enable_lazy() must be called before
+# keystone.i18n._() is called to ensure it has the desired lazy lookup
+# behavior. This includes cases, like keystone.exceptions, where
+# keystone.i18n._() is called at import time.
+oslo_i18n.enable_lazy()
+
+
+from keystone.common import environment
+from keystone.common import utils
+from keystone import config
+from keystone.i18n import _
+from keystone.openstack.common import service
+from keystone.openstack.common import systemd
+from keystone.server import common
+from keystone import service as keystone_service
+
+
+CONF = cfg.CONF
+
+
+class ServerWrapper(object):
+    """Wraps a Server with some launching info & capabilities."""
+
+    def __init__(self, server, workers):
+        self.server = server
+        self.workers = workers
+
+    def launch_with(self, launcher):
+        self.server.listen()
+        if self.workers > 1:
+            # Use multi-process launcher
+            launcher.launch_service(self.server, self.workers)
+        else:
+            # Use single process launcher
+            launcher.launch_service(self.server)
+
+
+def create_server(conf, name, host, port, workers):
+    app = keystone_service.loadapp('config:%s' % conf, name)
+    server = environment.Server(app, host=host, port=port,
+                                keepalive=CONF.eventlet_server.tcp_keepalive,
+                                keepidle=CONF.eventlet_server.tcp_keepidle)
+    if CONF.eventlet_server_ssl.enable:
+        server.set_ssl(CONF.eventlet_server_ssl.certfile,
+                       CONF.eventlet_server_ssl.keyfile,
+                       CONF.eventlet_server_ssl.ca_certs,
+                       CONF.eventlet_server_ssl.cert_required)
+    return name, ServerWrapper(server, workers)
+
+
+def serve(*servers):
+    logging.warning(_('Running keystone via eventlet is deprecated as of Kilo '
+                      'in favor of running in a WSGI server (e.g. mod_wsgi). '
+                      'Support for keystone under eventlet will be removed in '
+                      'the "M"-Release.'))
+    if max([server[1].workers for server in servers]) > 1:
+        launcher = service.ProcessLauncher()
+    else:
+        launcher = service.ServiceLauncher()
+
+    for name, server in servers:
+        try:
+            server.launch_with(launcher)
+        except socket.error:
+            logging.exception(_('Failed to start the %(name)s server') % {
+                'name': name})
+            raise
+
+    # notify calling process we are ready to serve
+    systemd.notify_once()
+
+    for name, server in servers:
+        launcher.wait()
+
+
+def _get_workers(worker_type_config_opt):
+    # Get the value from config, if the config value is None (not set), return
+    # the number of cpus with a minimum of 2.
+    worker_count = CONF.eventlet_server.get(worker_type_config_opt)
+    if not worker_count:
+        worker_count = max(2, processutils.get_worker_count())
+    return worker_count
+
+
+def configure_threading():
+    monkeypatch_thread = not CONF.standard_threads
+    pydev_debug_url = utils.setup_remote_pydev_debug()
+    if pydev_debug_url:
+        # in order to work around errors caused by monkey patching we have to
+        # set the thread to False.  An explanation is here:
+        # http://lists.openstack.org/pipermail/openstack-dev/2012-August/
+        # 000794.html
+        monkeypatch_thread = False
+    environment.use_eventlet(monkeypatch_thread)
+
+
+def run(possible_topdir):
+    dev_conf = os.path.join(possible_topdir,
+                            'etc',
+                            'keystone.conf')
+    config_files = None
+    if os.path.exists(dev_conf):
+        config_files = [dev_conf]
+
+    common.configure(
+        version=pbr.version.VersionInfo('keystone').version_string(),
+        config_files=config_files,
+        pre_setup_logging_fn=configure_threading)
+
+    paste_config = config.find_paste_config()
+
+    def create_servers():
+        admin_worker_count = _get_workers('admin_workers')
+        public_worker_count = _get_workers('public_workers')
+
+        servers = []
+        servers.append(create_server(paste_config,
+                                     'admin',
+                                     CONF.eventlet_server.admin_bind_host,
+                                     CONF.eventlet_server.admin_port,
+                                     admin_worker_count))
+        servers.append(create_server(paste_config,
+                                     'main',
+                                     CONF.eventlet_server.public_bind_host,
+                                     CONF.eventlet_server.public_port,
+                                     public_worker_count))
+        return servers
+
+    _unused, servers = common.setup_backends(
+        startup_application_fn=create_servers)
+    serve(*servers)
diff --git a/keystone-moon/keystone/server/wsgi.py b/keystone-moon/keystone/server/wsgi.py
new file mode 100644 (file)
index 0000000..863f13b
--- /dev/null
@@ -0,0 +1,52 @@
+# Copyright 2013 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import logging
+
+from oslo_config import cfg
+import oslo_i18n
+
+
+# NOTE(dstanek): i18n.enable_lazy() must be called before
+# keystone.i18n._() is called to ensure it has the desired lazy lookup
+# behavior. This includes cases, like keystone.exceptions, where
+# keystone.i18n._() is called at import time.
+oslo_i18n.enable_lazy()
+
+
+from keystone.common import environment
+from keystone import config
+from keystone.server import common
+from keystone import service as keystone_service
+
+
+CONF = cfg.CONF
+
+
+def initialize_application(name):
+    common.configure()
+
+    # Log the options used when starting if we're in debug mode...
+    if CONF.debug:
+        CONF.log_opt_values(logging.getLogger(CONF.prog), logging.DEBUG)
+
+    environment.use_stdlib()
+
+    def loadapp():
+        return keystone_service.loadapp(
+            'config:%s' % config.find_paste_config(), name)
+
+    _unused, application = common.setup_backends(
+        startup_application_fn=loadapp)
+    return application
diff --git a/keystone-moon/keystone/service.py b/keystone-moon/keystone/service.py
new file mode 100644 (file)
index 0000000..e9a0748
--- /dev/null
@@ -0,0 +1,118 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+import sys
+
+from oslo_config import cfg
+from oslo_log import log
+from paste import deploy
+import routes
+
+from keystone import assignment
+from keystone import auth
+from keystone import catalog
+from keystone.common import wsgi
+from keystone import controllers
+from keystone import credential
+from keystone import identity
+from keystone import policy
+from keystone import resource
+from keystone import routers
+from keystone import token
+from keystone import trust
+from keystone.contrib import moon as authz
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+def loadapp(conf, name):
+    # NOTE(blk-u): Save the application being loaded in the controllers module.
+    # This is similar to how public_app_factory() and v3_app_factory()
+    # register the version with the controllers module.
+    controllers.latest_app = deploy.loadapp(conf, name=name)
+    return controllers.latest_app
+
+
+def fail_gracefully(f):
+    """Logs exceptions and aborts."""
+    @functools.wraps(f)
+    def wrapper(*args, **kw):
+        try:
+            return f(*args, **kw)
+        except Exception as e:
+            LOG.debug(e, exc_info=True)
+
+            # exception message is printed to all logs
+            LOG.critical(e)
+            sys.exit(1)
+
+    return wrapper
+
+
+@fail_gracefully
+def public_app_factory(global_conf, **local_conf):
+    controllers.register_version('v2.0')
+    return wsgi.ComposingRouter(routes.Mapper(),
+                                [assignment.routers.Public(),
+                                 token.routers.Router(),
+                                 routers.VersionV2('public'),
+                                 routers.Extension(False)])
+
+
+@fail_gracefully
+def admin_app_factory(global_conf, **local_conf):
+    controllers.register_version('v2.0')
+    return wsgi.ComposingRouter(routes.Mapper(),
+                                [identity.routers.Admin(),
+                                 assignment.routers.Admin(),
+                                    token.routers.Router(),
+                                    resource.routers.Admin(),
+                                    routers.VersionV2('admin'),
+                                    routers.Extension()])
+
+
+@fail_gracefully
+def public_version_app_factory(global_conf, **local_conf):
+    return wsgi.ComposingRouter(routes.Mapper(),
+                                [routers.Versions('public')])
+
+
+@fail_gracefully
+def admin_version_app_factory(global_conf, **local_conf):
+    return wsgi.ComposingRouter(routes.Mapper(),
+                                [routers.Versions('admin')])
+
+
+@fail_gracefully
+def v3_app_factory(global_conf, **local_conf):
+    controllers.register_version('v3')
+    mapper = routes.Mapper()
+    sub_routers = []
+    _routers = []
+
+    router_modules = [assignment, auth, catalog, credential, identity, policy,
+                      resource, authz]
+    if CONF.trust.enabled:
+        router_modules.append(trust)
+
+    for module in router_modules:
+        routers_instance = module.routers.Routers()
+        _routers.append(routers_instance)
+        routers_instance.append_v3_routers(mapper, sub_routers)
+
+    # Add in the v3 version api
+    sub_routers.append(routers.VersionV3('public', _routers))
+    return wsgi.ComposingRouter(mapper, sub_routers)
diff --git a/keystone-moon/keystone/tests/__init__.py b/keystone-moon/keystone/tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/tests/moon/__init__.py b/keystone-moon/keystone/tests/moon/__init__.py
new file mode 100644 (file)
index 0000000..1b678d5
--- /dev/null
@@ -0,0 +1,4 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
diff --git a/keystone-moon/keystone/tests/moon/func/__init__.py b/keystone-moon/keystone/tests/moon/func/__init__.py
new file mode 100644 (file)
index 0000000..1b678d5
--- /dev/null
@@ -0,0 +1,4 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
diff --git a/keystone-moon/keystone/tests/moon/func/test_func_api_authz.py b/keystone-moon/keystone/tests/moon/func/test_func_api_authz.py
new file mode 100644 (file)
index 0000000..77438e9
--- /dev/null
@@ -0,0 +1,129 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
+
+import unittest
+import json
+import httplib
+
+
+CREDENTIALS = {
+    "host": "127.0.0.1",
+    "port": "35357",
+    "login": "admin",
+    "password": "nomoresecrete",
+    "tenant_name": "demo",
+    "sessionid": "kxb50d9uusiywfcs2fiidmu1j5nsyckr",
+    "csrftoken": "",
+    "x-subject-token": ""
+}
+
+
+def get_url(url, post_data=None, delete_data=None, crsftoken=None, method="GET", authtoken=None):
+    # MOON_SERVER_IP["URL"] = url
+    # _url = "http://{HOST}:{PORT}".format(**MOON_SERVER_IP)
+    if post_data:
+        method = "POST"
+    if delete_data:
+        method = "DELETE"
+    print("\033[32m{} {}\033[m".format(method, url))
+    conn = httplib.HTTPConnection(CREDENTIALS["host"], CREDENTIALS["port"])
+    headers = {
+        "Content-type": "application/x-www-form-urlencoded",
+        # "Accept": "text/plain",
+        "Accept": "text/plain,text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
+        'Cookie': 'sessionid={}'.format(CREDENTIALS["sessionid"]),
+    }
+    if crsftoken:
+        headers["Cookie"] = "csrftoken={}; sessionid={}; NG_TRANSLATE_LANG_KEY:\"en\"".format(crsftoken, CREDENTIALS["sessionid"])
+        CREDENTIALS["crsftoken"] = crsftoken
+    if authtoken:
+        headers["X-Auth-Token"] = CREDENTIALS["x-subject-token"]
+    if post_data:
+        method = "POST"
+        headers["Content-type"] = "application/json"
+        if crsftoken:
+            post_data = "&".join(map(lambda x: "=".join(x), post_data))
+        elif "crsftoken" in CREDENTIALS and "sessionid" in CREDENTIALS:
+            post_data = json.dumps(post_data)
+            headers["Cookie"] = "csrftoken={}; sessionid={}; NG_TRANSLATE_LANG_KEY:\"en\"".format(
+                CREDENTIALS["crsftoken"],
+                CREDENTIALS["sessionid"])
+        else:
+            post_data = json.dumps(post_data)
+        # conn.request(method, url, json.dumps(post_data), headers=headers)
+        conn.request(method, url, post_data, headers=headers)
+    elif delete_data:
+        method = "DELETE"
+        conn.request(method, url, json.dumps(delete_data), headers=headers)
+    else:
+        conn.request(method, url, headers=headers)
+    resp = conn.getresponse()
+    headers = resp.getheaders()
+    try:
+        CREDENTIALS["x-subject-token"] = dict(headers)["x-subject-token"]
+    except KeyError:
+        pass
+    if crsftoken:
+        sessionid_start = dict(headers)["set-cookie"].index("sessionid=")+len("sessionid=")
+        sessionid_end = dict(headers)["set-cookie"].index(";", sessionid_start)
+        sessionid = dict(headers)["set-cookie"][sessionid_start:sessionid_end]
+        CREDENTIALS["sessionid"] = sessionid
+    content = resp.read()
+    conn.close()
+    try:
+        return json.loads(content)
+    except ValueError:
+        return {"content": content}
+
+
+class AuthTest(unittest.TestCase):
+
+    def setUp(self):
+        post = {
+            "auth": {
+                "identity": {
+                    "methods": [
+                        "password"
+                    ],
+                    "password": {
+                        "user": {
+                            "domain": {
+                                "id": "Default"
+                            },
+                            "name": "admin",
+                            "password": "nomoresecrete"
+                        }
+                    }
+                },
+                "scope": {
+                    "project": {
+                        "domain": {
+                            "id": "Default"
+                        },
+                        "name": "demo"
+                    }
+                }
+            }
+        }
+        data = get_url("/v3/auth/tokens", post_data=post)
+        self.assertIn("token", data)
+
+    def tearDown(self):
+        pass
+
+    def test_authz(self):
+        data = get_url("/v3/OS-MOON/authz/1234567890/1111111/2222222/3333333", authtoken=True)
+        for key in ("authz", "subject_id", "tenant_id", "object_id", "action_id"):
+            self.assertIn(key, data)
+        print(data)
+        data = get_url("/v3/OS-MOON/authz/961420e0aeed4fd88e09cf4ae2ae700e/"
+                       "4cff0936eeed42439d746e8071245235/df60c814-bafd-44a8-ad34-6c649e75295f/unpause", authtoken=True)
+        for key in ("authz", "subject_id", "tenant_id", "object_id", "action_id"):
+            self.assertIn(key, data)
+        print(data)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/keystone-moon/keystone/tests/moon/func/test_func_api_intra_extension_admin.py b/keystone-moon/keystone/tests/moon/func/test_func_api_intra_extension_admin.py
new file mode 100644 (file)
index 0000000..607691e
--- /dev/null
@@ -0,0 +1,1011 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
+
+import unittest
+import json
+import httplib
+from uuid import uuid4
+import copy
+
+CREDENTIALS = {
+    "host": "127.0.0.1",
+    "port": "35357",
+    "login": "admin",
+    "password": "nomoresecrete",
+    "tenant_name": "demo",
+    "sessionid": "kxb50d9uusiywfcs2fiidmu1j5nsyckr",
+    "csrftoken": "",
+    "x-subject-token": ""
+}
+
+
+def get_url(url, post_data=None, delete_data=None, crsftoken=None, method="GET", authtoken=None):
+    # MOON_SERVER_IP["URL"] = url
+    # _url = "http://{HOST}:{PORT}".format(**MOON_SERVER_IP)
+    if post_data:
+        method = "POST"
+    if delete_data:
+        method = "DELETE"
+    # print("\033[32m{} {}\033[m".format(method, url))
+    conn = httplib.HTTPConnection(CREDENTIALS["host"], CREDENTIALS["port"])
+    headers = {
+        "Content-type": "application/x-www-form-urlencoded",
+        # "Accept": "text/plain",
+        "Accept": "text/plain,text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
+        'Cookie': 'sessionid={}'.format(CREDENTIALS["sessionid"]),
+    }
+    if crsftoken:
+        headers["Cookie"] = "csrftoken={}; sessionid={}; NG_TRANSLATE_LANG_KEY:\"en\"".format(crsftoken, CREDENTIALS["sessionid"])
+        CREDENTIALS["crsftoken"] = crsftoken
+    if authtoken:
+        headers["X-Auth-Token"] = CREDENTIALS["x-subject-token"]
+    if post_data:
+        method = "POST"
+        headers["Content-type"] = "application/json"
+        if crsftoken:
+            post_data = "&".join(map(lambda x: "=".join(x), post_data))
+        elif "crsftoken" in CREDENTIALS and "sessionid" in CREDENTIALS:
+            post_data = json.dumps(post_data)
+            headers["Cookie"] = "csrftoken={}; sessionid={}; NG_TRANSLATE_LANG_KEY:\"en\"".format(
+                CREDENTIALS["crsftoken"],
+                CREDENTIALS["sessionid"])
+        else:
+            post_data = json.dumps(post_data)
+        # conn.request(method, url, json.dumps(post_data), headers=headers)
+        conn.request(method, url, post_data, headers=headers)
+    elif delete_data:
+        method = "DELETE"
+        conn.request(method, url, json.dumps(delete_data), headers=headers)
+    else:
+        conn.request(method, url, headers=headers)
+    resp = conn.getresponse()
+    headers = resp.getheaders()
+    try:
+        CREDENTIALS["x-subject-token"] = dict(headers)["x-subject-token"]
+    except KeyError:
+        pass
+    if crsftoken:
+        sessionid_start = dict(headers)["set-cookie"].index("sessionid=")+len("sessionid=")
+        sessionid_end = dict(headers)["set-cookie"].index(";", sessionid_start)
+        sessionid = dict(headers)["set-cookie"][sessionid_start:sessionid_end]
+        CREDENTIALS["sessionid"] = sessionid
+    content = resp.read()
+    conn.close()
+    try:
+        return json.loads(content)
+    except ValueError:
+        return {"content": content}
+
+def get_keystone_user(name="demo", intra_extension_uuid=None):
+    users = get_url("/v3/users", authtoken=True)["users"]
+    demo_user_uuid = None
+    for user in users:
+        if user["name"] == name:
+            demo_user_uuid = user["id"]
+            break
+        # if user "name" is not present, fallback to admin
+        if user["name"] == "admin":
+            demo_user_uuid = user["id"]
+    if intra_extension_uuid:
+        post_data = {"subject_id": demo_user_uuid}
+        get_url("/v3/OS-MOON/intra_extensions/{}/subjects".format(
+            intra_extension_uuid), post_data=post_data, authtoken=True)
+    return demo_user_uuid
+
+class IntraExtensionsTest(unittest.TestCase):
+
+    def setUp(self):
+        post = {
+            "auth": {
+                "identity": {
+                    "methods": [
+                        "password"
+                    ],
+                    "password": {
+                        "user": {
+                            "domain": {
+                                "id": "Default"
+                            },
+                            "name": "admin",
+                            "password": "nomoresecrete"
+                        }
+                    }
+                },
+                "scope": {
+                    "project": {
+                        "domain": {
+                            "id": "Default"
+                        },
+                        "name": "demo"
+                    }
+                }
+            }
+        }
+        data = get_url("/v3/auth/tokens", post_data=post)
+        self.assertIn("token", data)
+
+    def tearDown(self):
+        pass
+
+    def test_create_intra_extensions(self):
+        data = get_url("/v3/OS-MOON/intra_extensions", authtoken=True)
+        self.assertIn("intra_extensions", data)
+        data = get_url("/v3/OS-MOON/authz_policies", authtoken=True)
+        self.assertIn("authz_policies", data)
+        for model in data["authz_policies"]:
+            # Create a new intra_extension
+            new_ie = {
+                "name": "new_intra_extension",
+                "description": "new_intra_extension",
+                "policymodel": model
+            }
+            data = get_url("/v3/OS-MOON/intra_extensions/", post_data=new_ie, authtoken=True)
+            for key in [u'model', u'id', u'name', u'description']:
+                self.assertIn(key, data)
+            ie_id = data["id"]
+            data = get_url("/v3/OS-MOON/intra_extensions", authtoken=True)
+            self.assertIn(ie_id, data["intra_extensions"])
+
+            # Get all subjects
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/subjects".format(ie_id), authtoken=True)
+            self.assertIn("subjects", data)
+            self.assertIs(type(data["subjects"]), dict)
+
+            # Get all objects
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/objects".format(ie_id), authtoken=True)
+            self.assertIn("objects", data)
+            self.assertIsInstance(data["objects"], dict)
+
+            # Get all actions
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/actions".format(ie_id), authtoken=True)
+            self.assertIn("actions", data)
+            self.assertIsInstance(data["actions"], dict)
+
+            # # get current tenant
+            # data = get_url("/v3/OS-MOON/intra_extensions/{}/tenant".format(ie_id), authtoken=True)
+            # self.assertIn("tenant", data)
+            # self.assertIn(type(data["tenant"]), (str, unicode))
+            #
+            # # set current tenant
+            # tenants = get_url("/v3/projects", authtoken=True)["projects"]
+            # post_data = {"tenant_id": ""}
+            # for tenant in tenants:
+            #     if tenant["name"] == "admin":
+            #         post_data = {"tenant_id": tenant["id"]}
+            #         break
+            # data = get_url("/v3/OS-MOON/intra_extensions/{}/tenant".format(ie_id),
+            #                post_data=post_data,
+            #                authtoken=True)
+            # self.assertIn("tenant", data)
+            # self.assertIn(type(data["tenant"]), (str, unicode))
+            # self.assertEqual(data["tenant"], post_data["tenant_id"])
+            #
+            # # check current tenant
+            # data = get_url("/v3/OS-MOON/intra_extensions/{}/tenant".format(ie_id), authtoken=True)
+            # self.assertIn("tenant", data)
+            # self.assertIn(type(data["tenant"]), (str, unicode))
+            # self.assertEqual(data["tenant"], post_data["tenant_id"])
+
+            # Delete the intra_extension
+            data = get_url("/v3/OS-MOON/intra_extensions/{}".format(ie_id), method="DELETE", authtoken=True)
+            data = get_url("/v3/OS-MOON/intra_extensions", authtoken=True)
+            self.assertNotIn(ie_id, data["intra_extensions"])
+
+    def test_perimeter_data(self):
+        data = get_url("/v3/OS-MOON/intra_extensions", authtoken=True)
+        self.assertIn("intra_extensions", data)
+        data = get_url("/v3/OS-MOON/authz_policies", authtoken=True)
+        self.assertIn("authz_policies", data)
+        for model in data["authz_policies"]:
+            # Create a new intra_extension
+            new_ie = {
+                "name": "new_intra_extension",
+                "description": "new_intra_extension",
+                "policymodel": model
+            }
+            data = get_url("/v3/OS-MOON/intra_extensions/", post_data=new_ie, authtoken=True)
+            for key in [u'model', u'id', u'name', u'description']:
+                self.assertIn(key, data)
+            ie_id = data["id"]
+            data = get_url("/v3/OS-MOON/intra_extensions", authtoken=True)
+            self.assertIn(ie_id, data["intra_extensions"])
+
+            # Get all subjects
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/subjects".format(ie_id), authtoken=True)
+            self.assertIn("subjects", data)
+            self.assertIs(type(data["subjects"]), dict)
+            self.assertTrue(len(data["subjects"]) > 0)
+
+            # Add a new subject
+            users = get_url("/v3/users", authtoken=True)["users"]
+            demo_user_uuid = None
+            for user in users:
+                if user["name"] == "demo":
+                    demo_user_uuid = user["id"]
+                    break
+                # if user demo is not present
+                if user["name"] == "admin":
+                    demo_user_uuid = user["id"]
+            post_data = {"subject_id": demo_user_uuid}
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/subjects".format(ie_id), post_data=post_data, authtoken=True)
+            self.assertIn("subject", data)
+            self.assertIs(type(data["subject"]), dict)
+            self.assertEqual(post_data["subject_id"], data["subject"]["uuid"])
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/subjects".format(ie_id), authtoken=True)
+            self.assertIn("subjects", data)
+            self.assertIsInstance(data["subjects"], dict)
+            self.assertIn(post_data["subject_id"], data["subjects"])
+            # delete the previous subject
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/subjects/{}".format(ie_id, post_data["subject_id"]),
+                           method="DELETE", authtoken=True)
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/subjects".format(ie_id), authtoken=True)
+            self.assertIn("subjects", data)
+            self.assertIsInstance(data["subjects"], dict)
+            self.assertNotIn(post_data["subject_id"], data["subjects"])
+
+            # Get all objects
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/objects".format(ie_id), authtoken=True)
+            self.assertIn("objects", data)
+            self.assertIs(type(data["objects"]), dict)
+            self.assertTrue(len(data["objects"]) > 0)
+
+            # Add a new object
+            post_data = {"object_id": "my_new_object"}
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/objects".format(ie_id), post_data=post_data, authtoken=True)
+            self.assertIn("object", data)
+            self.assertIsInstance(data["object"], dict)
+            self.assertEqual(post_data["object_id"], data["object"]["name"])
+            object_id = data["object"]["uuid"]
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/objects".format(ie_id), authtoken=True)
+            self.assertIn("objects", data)
+            self.assertIsInstance(data["objects"], dict)
+            self.assertIn(post_data["object_id"], data["objects"].values())
+
+            # delete the previous object
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/objects/{}".format(ie_id, object_id),
+                           method="DELETE", authtoken=True)
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/objects".format(ie_id), authtoken=True)
+            self.assertIn("objects", data)
+            self.assertIsInstance(data["objects"], dict)
+            self.assertNotIn(post_data["object_id"], data["objects"].values())
+
+            # Get all actions
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/actions".format(ie_id), authtoken=True)
+            self.assertIn("actions", data)
+            self.assertIs(type(data["actions"]), dict)
+            self.assertTrue(len(data["actions"]) > 0)
+
+            # Add a new action
+            post_data = {"action_id": "create2"}
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/actions".format(ie_id), post_data=post_data, authtoken=True)
+            action_id = data["action"]["uuid"]
+            self.assertIn("action", data)
+            self.assertIsInstance(data["action"], dict)
+            self.assertEqual(post_data["action_id"], data["action"]["name"])
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/actions".format(ie_id), authtoken=True)
+            self.assertIn("actions", data)
+            self.assertIsInstance(data["actions"], dict)
+            self.assertIn(post_data["action_id"], data["actions"].values())
+
+            # delete the previous action
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/actions/{}".format(ie_id, action_id),
+                           method="DELETE", authtoken=True)
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/actions".format(ie_id), authtoken=True)
+            self.assertIn("actions", data)
+            self.assertIsInstance(data["actions"], dict)
+            self.assertNotIn(post_data["action_id"], data["actions"])
+
+            # Delete the intra_extension
+            data = get_url("/v3/OS-MOON/intra_extensions/{}".format(ie_id), method="DELETE", authtoken=True)
+            data = get_url("/v3/OS-MOON/intra_extensions", authtoken=True)
+            self.assertNotIn(ie_id, data["intra_extensions"])
+
+    def test_assignments_data(self):
+        data = get_url("/v3/OS-MOON/intra_extensions", authtoken=True)
+        self.assertIn("intra_extensions", data)
+        data = get_url("/v3/OS-MOON/authz_policies", authtoken=True)
+        self.assertIn("authz_policies", data)
+        for model in data["authz_policies"]:
+            # Create a new intra_extension
+            new_ie = {
+                "name": "new_intra_extension",
+                "description": "new_intra_extension",
+                "policymodel": model
+            }
+            data = get_url("/v3/OS-MOON/intra_extensions/", post_data=new_ie, authtoken=True)
+            for key in [u'model', u'id', u'name', u'description']:
+                self.assertIn(key, data)
+            ie_id = data["id"]
+            data = get_url("/v3/OS-MOON/intra_extensions", authtoken=True)
+            self.assertIn(ie_id, data["intra_extensions"])
+
+            # Get all subject_assignments
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/subject_assignments/{}".format(
+                ie_id, get_keystone_user(intra_extension_uuid=ie_id)), authtoken=True)
+            self.assertIn("subject_category_assignments", data)
+            self.assertIs(type(data["subject_category_assignments"]), dict)
+
+            # Add subject_assignments
+            # get one subject
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/subjects".format(ie_id), authtoken=True)
+            self.assertIn("subjects", data)
+            self.assertIs(type(data["subjects"]), dict)
+            # subject_id = data["subjects"].keys()[0]
+            subject_id = get_keystone_user()
+            # get one subject category
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/subject_categories".format(ie_id), authtoken=True)
+            self.assertIn("subject_categories", data)
+            self.assertIs(type(data["subject_categories"]), dict)
+            subject_category_id = data["subject_categories"].keys()[0]
+            # get all subject category scope
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/subject_category_scope/{}".format(
+                ie_id, subject_category_id), authtoken=True)
+            self.assertIn("subject_category_scope", data)
+            self.assertIs(type(data["subject_category_scope"]), dict)
+            subject_category_scope_id = data["subject_category_scope"][subject_category_id].keys()[0]
+            post_data = {
+                "subject_id": subject_id,
+                "subject_category": subject_category_id,
+                "subject_category_scope": subject_category_scope_id
+            }
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/subject_assignments".format(ie_id), post_data=post_data, authtoken=True)
+            self.assertIn("subject_category_assignments", data)
+            self.assertIs(type(data["subject_category_assignments"]), dict)
+            self.assertIn(post_data["subject_category"], data["subject_category_assignments"][subject_id])
+            self.assertIn(post_data["subject_category"], data["subject_category_assignments"][subject_id])
+            self.assertIn(post_data["subject_category_scope"],
+                          data["subject_category_assignments"][subject_id][post_data["subject_category"]])
+            # data = get_url("/v3/OS-MOON/intra_extensions/{}/subjects".format(ie_id), authtoken=True)
+            # self.assertIn("subjects", data)
+            # self.assertIsInstance(data["subjects"], dict)
+            # self.assertIn(post_data["subject_id"], data["subjects"])
+
+            # delete the previous subject assignment
+            get_url("/v3/OS-MOON/intra_extensions/{}/subject_assignments/{}/{}/{}".format(
+                ie_id,
+                post_data["subject_id"],
+                post_data["subject_category"],
+                post_data["subject_category_scope"],
+                ),
+                method="DELETE", authtoken=True)
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/subject_assignments/{}".format(
+                ie_id, get_keystone_user()), authtoken=True)
+            self.assertIn("subject_category_assignments", data)
+            self.assertIs(type(data["subject_category_assignments"]), dict)
+            if post_data["subject_category"] in data["subject_category_assignments"][subject_id]:
+                if post_data["subject_category"] in data["subject_category_assignments"][subject_id]:
+                    self.assertNotIn(post_data["subject_category_scope"],
+                          data["subject_category_assignments"][subject_id][post_data["subject_category"]])
+
+            # Get all object_assignments
+
+            # get one object
+            post_data = {"object_id": "my_new_object"}
+            new_object = get_url("/v3/OS-MOON/intra_extensions/{}/objects".format(ie_id), post_data=post_data, authtoken=True)
+            object_id = new_object["object"]["uuid"]
+
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/object_assignments/{}".format(
+                ie_id, object_id), authtoken=True)
+            self.assertIn("object_category_assignments", data)
+            self.assertIsInstance(data["object_category_assignments"], dict)
+
+            # Add object_assignments
+            # get one object category
+            post_data = {"object_category_id": uuid4().hex}
+            object_category = get_url("/v3/OS-MOON/intra_extensions/{}/object_categories".format(ie_id),
+                           post_data=post_data,
+                           authtoken=True)
+            object_category_id = object_category["object_category"]["uuid"]
+            # get all object category scope
+            post_data = {
+                "object_category_id": object_category_id,
+                "object_category_scope_id": uuid4().hex
+            }
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/object_category_scope".format(ie_id),
+                           post_data=post_data,
+                           authtoken=True)
+            object_category_scope_id = data["object_category_scope"]["uuid"]
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/object_category_scope/{}".format(
+                ie_id, object_category_id), authtoken=True)
+            self.assertIn("object_category_scope", data)
+            self.assertIs(type(data["object_category_scope"]), dict)
+            post_data = {
+                "object_id": object_id,
+                "object_category": object_category_id,
+                "object_category_scope": object_category_scope_id
+            }
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/object_assignments".format(ie_id), post_data=post_data, authtoken=True)
+            self.assertIn("object_category_assignments", data)
+            self.assertIs(type(data["object_category_assignments"]), dict)
+            self.assertIn(post_data["object_id"], data["object_category_assignments"])
+            self.assertIn(post_data["object_category"], data["object_category_assignments"][post_data["object_id"]])
+            self.assertIn(post_data["object_category_scope"],
+                          data["object_category_assignments"][post_data["object_id"]][post_data["object_category"]])
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/objects".format(ie_id), authtoken=True)
+            self.assertIn("objects", data)
+            self.assertIsInstance(data["objects"], dict)
+            self.assertIn(post_data["object_id"], data["objects"])
+            # delete the previous object
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/objects/{}".format(ie_id, post_data["object_id"]),
+                           method="DELETE", authtoken=True)
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/objects".format(ie_id), authtoken=True)
+            self.assertIn("objects", data)
+            self.assertIsInstance(data["objects"], dict)
+            self.assertNotIn(post_data["object_id"], data["objects"])
+
+            # Get all actions_assignments
+
+            # get one action
+            post_data = {"action_id": "my_new_action"}
+            new_object = get_url("/v3/OS-MOON/intra_extensions/{}/actions".format(ie_id), post_data=post_data, authtoken=True)
+            action_id = new_object["action"]["uuid"]
+
+            post_data = {"action_category_id": uuid4().hex}
+            action_category = get_url("/v3/OS-MOON/intra_extensions/{}/action_categories".format(ie_id),
+                           post_data=post_data,
+                           authtoken=True)
+            action_category_id = action_category["action_category"]["uuid"]
+
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/action_assignments/{}".format(
+                ie_id, action_id), authtoken=True)
+            self.assertIn("action_category_assignments", data)
+            self.assertIsInstance(data["action_category_assignments"], dict)
+
+            # Add action_assignments
+            # get one action category
+            # data = get_url("/v3/OS-MOON/intra_extensions/{}/action_categories".format(ie_id), authtoken=True)
+            # self.assertIn("action_categories", data)
+            # self.assertIs(type(data["action_categories"]), dict)
+            # action_category_id = data["action_categories"][0]
+            # get all action category scope
+            post_data = {
+                "action_category_id": action_category_id,
+                "action_category_scope_id": uuid4().hex
+            }
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/action_category_scope".format(ie_id),
+                           post_data=post_data,
+                           authtoken=True)
+            action_category_scope_id = data["action_category_scope"]["uuid"]
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/action_category_scope/{}".format(
+                ie_id, action_category_id), authtoken=True)
+            self.assertIn("action_category_scope", data)
+            self.assertIs(type(data["action_category_scope"]), dict)
+            # action_category_scope_id = data["action_category_scope"][action_category_id].keys()[0]
+            post_data = {
+                "action_id": action_id,
+                "action_category": action_category_id,
+                "action_category_scope": action_category_scope_id
+            }
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/action_assignments".format(ie_id), post_data=post_data, authtoken=True)
+            self.assertIn("action_category_assignments", data)
+            self.assertIs(type(data["action_category_assignments"]), dict)
+            self.assertIn(post_data["action_id"], data["action_category_assignments"])
+            self.assertIn(post_data["action_category"], data["action_category_assignments"][post_data["action_id"]])
+            self.assertIn(post_data["action_category_scope"],
+                          data["action_category_assignments"][post_data["action_id"]][post_data["action_category"]])
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/actions".format(ie_id), authtoken=True)
+            self.assertIn("actions", data)
+            self.assertIsInstance(data["actions"], dict)
+            self.assertIn(post_data["action_id"], data["actions"])
+            # delete the previous action
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/actions/{}".format(ie_id, post_data["action_id"]),
+                           method="DELETE", authtoken=True)
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/actions".format(ie_id), authtoken=True)
+            self.assertIn("actions", data)
+            self.assertIsInstance(data["actions"], dict)
+            self.assertNotIn(post_data["action_id"], data["actions"])
+
+            # Delete the intra_extension
+            get_url("/v3/OS-MOON/intra_extensions/{}".format(ie_id), method="DELETE", authtoken=True)
+            data = get_url("/v3/OS-MOON/intra_extensions", authtoken=True)
+            self.assertNotIn(ie_id, data["intra_extensions"])
+
+    def test_metadata_data(self):
+        data = get_url("/v3/OS-MOON/intra_extensions", authtoken=True)
+        self.assertIn("intra_extensions", data)
+        data = get_url("/v3/OS-MOON/authz_policies", authtoken=True)
+        self.assertIn("authz_policies", data)
+        for model in data["authz_policies"]:
+            # Create a new intra_extension
+            new_ie = {
+                "name": "new_intra_extension",
+                "description": "new_intra_extension",
+                "policymodel": model
+            }
+            data = get_url("/v3/OS-MOON/intra_extensions/", post_data=new_ie, authtoken=True)
+            for key in [u'model', u'id', u'name', u'description']:
+                self.assertIn(key, data)
+            ie_id = data["id"]
+            data = get_url("/v3/OS-MOON/intra_extensions", authtoken=True)
+            self.assertIn(ie_id, data["intra_extensions"])
+
+            # Get all subject_categories
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/subject_categories".format(ie_id), authtoken=True)
+            self.assertIn("subject_categories", data)
+            self.assertIs(type(data["subject_categories"]), dict)
+
+            # Add a new subject_category
+            post_data = {"subject_category_id": uuid4().hex}
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/subject_categories".format(ie_id),
+                           post_data=post_data,
+                           authtoken=True)
+            self.assertIn("subject_category", data)
+            self.assertIsInstance(data["subject_category"], dict)
+            self.assertEqual(post_data["subject_category_id"], data["subject_category"]["name"])
+            subject_category_id = data["subject_category"]["uuid"]
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/subject_categories".format(ie_id), authtoken=True)
+            self.assertIn("subject_categories", data)
+            self.assertIsInstance(data["subject_categories"], dict)
+            self.assertIn(post_data["subject_category_id"], data["subject_categories"].values())
+            # delete the previous subject_category
+            get_url("/v3/OS-MOON/intra_extensions/{}/subject_categories/{}".format(ie_id,
+                                                                                   subject_category_id),
+                    method="DELETE",
+                    authtoken=True)
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/subject_categories".format(ie_id), authtoken=True)
+            self.assertIn("subject_categories", data)
+            self.assertIsInstance(data["subject_categories"], dict)
+            self.assertNotIn(post_data["subject_category_id"], data["subject_categories"].values())
+
+            # Get all object_categories
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/object_categories".format(ie_id), authtoken=True)
+            self.assertIn("object_categories", data)
+            self.assertIsInstance(data["object_categories"], dict)
+
+            # Add a new object_category
+            post_data = {"object_category_id": uuid4().hex}
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/object_categories".format(ie_id),
+                           post_data=post_data,
+                           authtoken=True)
+            self.assertIn("object_category", data)
+            self.assertIsInstance(data["object_category"], dict)
+            self.assertIn(post_data["object_category_id"], data["object_category"]["name"])
+            object_category_id = data["object_category"]["uuid"]
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/object_categories".format(ie_id), authtoken=True)
+            self.assertIn("object_categories", data)
+            self.assertIsInstance(data["object_categories"], dict)
+            self.assertIn(post_data["object_category_id"], data["object_categories"].values())
+            # delete the previous subject_category
+            get_url("/v3/OS-MOON/intra_extensions/{}/object_categories/{}".format(ie_id,
+                                                                                  object_category_id),
+                    method="DELETE",
+                    authtoken=True)
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/object_categories".format(ie_id), authtoken=True)
+            self.assertIn("object_categories", data)
+            self.assertIsInstance(data["object_categories"], dict)
+            self.assertNotIn(post_data["object_category_id"], data["object_categories"].values())
+
+            # Get all action_categories
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/action_categories".format(ie_id), authtoken=True)
+            self.assertIn("action_categories", data)
+            self.assertIsInstance(data["action_categories"], dict)
+
+            # Add a new action_category
+            post_data = {"action_category_id": uuid4().hex}
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/action_categories".format(ie_id),
+                           post_data=post_data,
+                           authtoken=True)
+            self.assertIn("action_category", data)
+            self.assertIsInstance(data["action_category"], dict)
+            self.assertIn(post_data["action_category_id"], data["action_category"]["name"])
+            action_category_id = data["action_category"]["uuid"]
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/action_categories".format(ie_id), authtoken=True)
+            self.assertIn("action_categories", data)
+            self.assertIsInstance(data["action_categories"], dict)
+            self.assertIn(post_data["action_category_id"], data["action_categories"].values())
+            # delete the previous subject_category
+            get_url("/v3/OS-MOON/intra_extensions/{}/action_categories/{}".format(ie_id,
+                                                                                  action_category_id),
+                    method="DELETE",
+                    authtoken=True)
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/action_categories".format(ie_id), authtoken=True)
+            self.assertIn("action_categories", data)
+            self.assertIsInstance(data["action_categories"], dict)
+            self.assertNotIn(post_data["action_category_id"], data["action_categories"].values())
+
+            # Delete the intra_extension
+            get_url("/v3/OS-MOON/intra_extensions/{}".format(ie_id), method="DELETE", authtoken=True)
+            data = get_url("/v3/OS-MOON/intra_extensions", authtoken=True)
+            self.assertNotIn(ie_id, data["intra_extensions"])
+
+    def test_scope_data(self):
+        data = get_url("/v3/OS-MOON/intra_extensions", authtoken=True)
+        self.assertIn("intra_extensions", data)
+        data = get_url("/v3/OS-MOON/authz_policies", authtoken=True)
+        self.assertIn("authz_policies", data)
+        for model in data["authz_policies"]:
+            # Create a new intra_extension
+            new_ie = {
+                "name": "new_intra_extension",
+                "description": "new_intra_extension",
+                "policymodel": model
+            }
+            data = get_url("/v3/OS-MOON/intra_extensions/", post_data=new_ie, authtoken=True)
+            for key in [u'model', u'id', u'name', u'description']:
+                self.assertIn(key, data)
+            ie_id = data["id"]
+            data = get_url("/v3/OS-MOON/intra_extensions", authtoken=True)
+            self.assertIn(ie_id, data["intra_extensions"])
+
+            # Get all subject_category_scope
+            categories = get_url("/v3/OS-MOON/intra_extensions/{}/subject_categories".format(ie_id), authtoken=True)
+            for category in categories["subject_categories"]:
+                data = get_url("/v3/OS-MOON/intra_extensions/{}/subject_category_scope/{}".format(
+                    ie_id, category), authtoken=True)
+                self.assertIn("subject_category_scope", data)
+                self.assertIs(type(data["subject_category_scope"]), dict)
+
+                # Add a new subject_category_scope
+                post_data = {
+                    "subject_category_id": category,
+                    "subject_category_scope_id": uuid4().hex
+                }
+                data = get_url("/v3/OS-MOON/intra_extensions/{}/subject_category_scope".format(ie_id),
+                               post_data=post_data,
+                               authtoken=True)
+                self.assertIn("subject_category_scope", data)
+                self.assertIsInstance(data["subject_category_scope"], dict)
+                self.assertEqual(post_data["subject_category_scope_id"], data["subject_category_scope"]["name"])
+                data = get_url("/v3/OS-MOON/intra_extensions/{}/subject_category_scope/{}".format(
+                    ie_id, category), authtoken=True)
+                self.assertIn("subject_category_scope", data)
+                self.assertIsInstance(data["subject_category_scope"], dict)
+                self.assertIn(post_data["subject_category_id"], data["subject_category_scope"])
+                self.assertIn(post_data["subject_category_scope_id"],
+                              data["subject_category_scope"][category].values())
+                # delete the previous subject_category_scope
+                get_url("/v3/OS-MOON/intra_extensions/{}/subject_category_scope/{}/{}".format(
+                    ie_id,
+                    post_data["subject_category_id"],
+                    post_data["subject_category_scope_id"]),
+                    method="DELETE",
+                    authtoken=True)
+                data = get_url("/v3/OS-MOON/intra_extensions/{}/subject_category_scope/{}".format(
+                    ie_id, category), authtoken=True)
+                self.assertIn("subject_category_scope", data)
+                self.assertIsInstance(data["subject_category_scope"], dict)
+                self.assertIn(post_data["subject_category_id"], data["subject_category_scope"])
+                self.assertNotIn(post_data["subject_category_scope_id"],
+                                 data["subject_category_scope"][post_data["subject_category_id"]])
+
+            # Get all object_category_scope
+            # get object_categories
+            categories = get_url("/v3/OS-MOON/intra_extensions/{}/object_categories".format(ie_id), authtoken=True)
+            for category in categories["object_categories"]:
+                post_data = {
+                    "object_category_id": category,
+                    "object_category_scope_id": uuid4().hex
+                }
+                data = get_url("/v3/OS-MOON/intra_extensions/{}/object_category_scope".format(ie_id),
+                               post_data=post_data,
+                               authtoken=True)
+                self.assertIn("object_category_scope", data)
+                self.assertIsInstance(data["object_category_scope"], dict)
+                self.assertEqual(post_data["object_category_scope_id"], data["object_category_scope"]["name"])
+                data = get_url("/v3/OS-MOON/intra_extensions/{}/object_category_scope/{}".format(
+                    ie_id, category), authtoken=True)
+                self.assertIn("object_category_scope", data)
+                self.assertIsInstance(data["object_category_scope"], dict)
+                self.assertIn(post_data["object_category_id"], data["object_category_scope"])
+                self.assertIn(post_data["object_category_scope_id"],
+                              data["object_category_scope"][category].values())
+                # delete the previous object_category_scope
+                get_url("/v3/OS-MOON/intra_extensions/{}/object_category_scope/{}/{}".format(
+                    ie_id,
+                    post_data["object_category_id"],
+                    post_data["object_category_scope_id"]),
+                    method="DELETE",
+                    authtoken=True)
+                data = get_url("/v3/OS-MOON/intra_extensions/{}/object_category_scope/{}".format(
+                    ie_id, category), authtoken=True)
+                self.assertIn("object_category_scope", data)
+                self.assertIsInstance(data["object_category_scope"], dict)
+                self.assertIn(post_data["object_category_id"], data["object_category_scope"])
+                self.assertNotIn(post_data["object_category_scope_id"],
+                                 data["object_category_scope"][post_data["object_category_id"]])
+
+            # Get all action_category_scope
+            categories = get_url("/v3/OS-MOON/intra_extensions/{}/action_categories".format(ie_id), authtoken=True)
+            print(categories)
+            for category in categories["action_categories"]:
+                print(category)
+                data = get_url("/v3/OS-MOON/intra_extensions/{}/action_category_scope/{}".format(
+                    ie_id, category), authtoken=True)
+                self.assertIn("action_category_scope", data)
+                self.assertIsInstance(data["action_category_scope"], dict)
+
+                # Add a new action_category_scope
+                post_data = {
+                    "action_category_id": category,
+                    "action_category_scope_id": uuid4().hex
+                }
+                data = get_url("/v3/OS-MOON/intra_extensions/{}/action_category_scope".format(ie_id),
+                               post_data=post_data,
+                               authtoken=True)
+                self.assertIn("action_category_scope", data)
+                self.assertIsInstance(data["action_category_scope"], dict)
+                self.assertEqual(post_data["action_category_scope_id"], data["action_category_scope"]["name"])
+                data = get_url("/v3/OS-MOON/intra_extensions/{}/action_category_scope/{}".format(
+                    ie_id, category), authtoken=True)
+                self.assertIn("action_category_scope", data)
+                self.assertIsInstance(data["action_category_scope"], dict)
+                self.assertIn(post_data["action_category_id"], data["action_category_scope"])
+                self.assertIn(post_data["action_category_scope_id"],
+                              data["action_category_scope"][category].values())
+                # delete the previous action_category_scope
+                get_url("/v3/OS-MOON/intra_extensions/{}/action_category_scope/{}/{}".format(
+                    ie_id,
+                    post_data["action_category_id"],
+                    post_data["action_category_scope_id"]),
+                    method="DELETE",
+                    authtoken=True)
+                data = get_url("/v3/OS-MOON/intra_extensions/{}/action_category_scope/{}".format(
+                    ie_id, category), authtoken=True)
+                self.assertIn("action_category_scope", data)
+                self.assertIsInstance(data["action_category_scope"], dict)
+                self.assertIn(post_data["action_category_id"], data["action_category_scope"])
+                self.assertNotIn(post_data["action_category_scope_id"],
+                                 data["action_category_scope"][post_data["action_category_id"]])
+
+            # Delete the intra_extension
+            get_url("/v3/OS-MOON/intra_extensions/{}".format(ie_id), method="DELETE", authtoken=True)
+            data = get_url("/v3/OS-MOON/intra_extensions", authtoken=True)
+            self.assertNotIn(ie_id, data["intra_extensions"])
+
+    def test_metarule_data(self):
+        data = get_url("/v3/OS-MOON/intra_extensions", authtoken=True)
+        self.assertIn("intra_extensions", data)
+        data = get_url("/v3/OS-MOON/authz_policies", authtoken=True)
+        self.assertIn("authz_policies", data)
+        for model in data["authz_policies"]:
+            # Create a new intra_extension
+            new_ie = {
+                "name": "new_intra_extension",
+                "description": "new_intra_extension",
+                "policymodel": model
+            }
+            data = get_url("/v3/OS-MOON/intra_extensions/", post_data=new_ie, authtoken=True)
+            for key in [u'model', u'id', u'name', u'description']:
+                self.assertIn(key, data)
+            ie_id = data["id"]
+            data = get_url("/v3/OS-MOON/intra_extensions", authtoken=True)
+            self.assertIn(ie_id, data["intra_extensions"])
+
+            # Get all aggregation_algorithms
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/aggregation_algorithms".format(ie_id), authtoken=True)
+            self.assertIn("aggregation_algorithms", data)
+            self.assertIs(type(data["aggregation_algorithms"]), list)
+            aggregation_algorithms = data["aggregation_algorithms"]
+
+            # Get all sub_meta_rule_relations
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/sub_meta_rule_relations".format(ie_id), authtoken=True)
+            self.assertIn("sub_meta_rule_relations", data)
+            self.assertIs(type(data["sub_meta_rule_relations"]), list)
+            sub_meta_rule_relations = data["sub_meta_rule_relations"]
+
+            # Get current aggregation_algorithm
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/aggregation_algorithm".format(ie_id), authtoken=True)
+            self.assertIn("aggregation", data)
+            self.assertIn(type(data["aggregation"]), (str, unicode))
+            aggregation_algorithm = data["aggregation"]
+
+            # Set current aggregation_algorithm
+            post_data = {"aggregation_algorithm": ""}
+            for _algo in aggregation_algorithms:
+                if _algo != aggregation_algorithm:
+                    post_data = {"aggregation_algorithm": _algo}
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/aggregation_algorithm".format(ie_id),
+                           post_data=post_data,
+                           authtoken=True)
+            self.assertIn("aggregation", data)
+            self.assertIn(type(data["aggregation"]), (str, unicode))
+            self.assertEqual(post_data["aggregation_algorithm"], data["aggregation"])
+            new_aggregation_algorithm = data["aggregation"]
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/aggregation_algorithm".format(ie_id), authtoken=True)
+            self.assertIn("aggregation", data)
+            self.assertIn(type(data["aggregation"]), (str, unicode))
+            self.assertEqual(post_data["aggregation_algorithm"], new_aggregation_algorithm)
+            # Get back to the old value
+            post_data = {"aggregation_algorithm": aggregation_algorithm}
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/aggregation_algorithm".format(ie_id),
+                           post_data=post_data,
+                           authtoken=True)
+            self.assertIn("aggregation", data)
+            self.assertIn(type(data["aggregation"]), (str, unicode))
+            self.assertEqual(post_data["aggregation_algorithm"], aggregation_algorithm)
+
+            # Get current sub_meta_rule
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/sub_meta_rule".format(ie_id), authtoken=True)
+            self.assertIn("sub_meta_rules", data)
+            self.assertIs(type(data["sub_meta_rules"]), dict)
+            self.assertGreater(len(data["sub_meta_rules"].keys()), 0)
+            relation = data["sub_meta_rules"].keys()[0]
+            new_relation = ""
+            self.assertIn(relation, sub_meta_rule_relations)
+            sub_meta_rule = data["sub_meta_rules"]
+            post_data = dict()
+            for _relation in sub_meta_rule_relations:
+                if _relation != data["sub_meta_rules"].keys()[0]:
+                    post_data[_relation] = copy.deepcopy(sub_meta_rule[relation])
+                    post_data[_relation]["relation"] = _relation
+                    new_relation = _relation
+                    break
+            # Add a new subject category
+            subject_category = uuid4().hex
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/subject_categories".format(ie_id), 
+                           post_data={"subject_category_id": subject_category},
+                           authtoken=True)
+            self.assertIn("subject_category", data)
+            self.assertIsInstance(data["subject_category"], dict)
+            self.assertIn(subject_category, data["subject_category"].values())
+            subject_category_id = data["subject_category"]['uuid']
+            # Add a new object category
+            object_category = uuid4().hex
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/object_categories".format(ie_id), 
+                           post_data={"object_category_id": object_category},
+                           authtoken=True)
+            self.assertIn("object_category", data)
+            self.assertIsInstance(data["object_category"], dict)
+            self.assertIn(object_category, data["object_category"].values())
+            object_category_id = data["object_category"]['uuid']
+            # Add a new action category
+            action_category = uuid4().hex
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/action_categories".format(ie_id), 
+                           post_data={"action_category_id": action_category},
+                           authtoken=True)
+            self.assertIn("action_category", data)
+            self.assertIsInstance(data["action_category"], dict)
+            self.assertIn(action_category, data["action_category"].values())
+            action_category_id = data["action_category"]['uuid']
+            # Modify the post_data to add new categories
+            post_data[new_relation]["subject_categories"].append(subject_category_id)
+            post_data[new_relation]["object_categories"].append(object_category_id)
+            post_data[new_relation]["action_categories"].append(action_category_id)
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/sub_meta_rule".format(ie_id),
+                           post_data=post_data,
+                           authtoken=True)
+            self.assertIn("sub_meta_rules", data)
+            self.assertIs(type(data["sub_meta_rules"]), dict)
+            self.assertGreater(len(data["sub_meta_rules"].keys()), 0)
+            self.assertEqual(new_relation, data["sub_meta_rules"].keys()[0])
+            self.assertIn(subject_category_id, data["sub_meta_rules"][new_relation]["subject_categories"])
+            self.assertIn(object_category_id, data["sub_meta_rules"][new_relation]["object_categories"])
+            self.assertIn(action_category_id, data["sub_meta_rules"][new_relation]["action_categories"])
+            self.assertEqual(new_relation, data["sub_meta_rules"][new_relation]["relation"])
+
+            # Delete the intra_extension
+            data = get_url("/v3/OS-MOON/intra_extensions/{}".format(ie_id), method="DELETE", authtoken=True)
+            data = get_url("/v3/OS-MOON/intra_extensions", authtoken=True)
+            self.assertNotIn(ie_id, data["intra_extensions"])
+
+    def test_rules_data(self):
+        data = get_url("/v3/OS-MOON/intra_extensions", authtoken=True)
+        self.assertIn("intra_extensions", data)
+        data = get_url("/v3/OS-MOON/authz_policies", authtoken=True)
+        self.assertIn("authz_policies", data)
+        for model in data["authz_policies"]:
+            # Create a new intra_extension
+            print("=====> {}".format(model))
+            new_ie = {
+                "name": "new_intra_extension",
+                "description": "new_intra_extension",
+                "policymodel": model
+            }
+            data = get_url("/v3/OS-MOON/intra_extensions/", post_data=new_ie, authtoken=True)
+            for key in [u'model', u'id', u'name', u'description']:
+                self.assertIn(key, data)
+            ie_id = data["id"]
+            data = get_url("/v3/OS-MOON/intra_extensions", authtoken=True)
+            self.assertIn(ie_id, data["intra_extensions"])
+
+            # Get all sub_meta_rule_relations
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/sub_meta_rule_relations".format(ie_id), authtoken=True)
+            self.assertIn("sub_meta_rule_relations", data)
+            self.assertIs(type(data["sub_meta_rule_relations"]), list)
+            sub_meta_rule_relations = data["sub_meta_rule_relations"]
+
+            # Get current sub_meta_rule
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/sub_meta_rule".format(ie_id), authtoken=True)
+            self.assertIn("sub_meta_rules", data)
+            self.assertIs(type(data["sub_meta_rules"]), dict)
+            self.assertGreater(len(data["sub_meta_rules"].keys()), 0)
+            relation = data["sub_meta_rules"].keys()[0]
+            self.assertIn(relation, sub_meta_rule_relations)
+            sub_meta_rule = data["sub_meta_rules"]
+            sub_meta_rule_length = dict()
+            sub_meta_rule_length[relation] = len(data["sub_meta_rules"][relation]["subject_categories"]) + \
+                len(data["sub_meta_rules"][relation]["object_categories"]) + \
+                len(data["sub_meta_rules"][relation]["action_categories"]) +1
+
+            # Get all rules
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/sub_rules".format(ie_id), authtoken=True)
+            self.assertIn("rules", data)
+            self.assertIs(type(data["rules"]), dict)
+            length = dict()
+            for key in data["rules"]:
+                self.assertIn(key, sub_meta_rule_relations)
+                self.assertGreater(len(data["rules"][key]), 0)
+                self.assertIs(type(data["rules"][key]), list)
+                for sub_rule in data["rules"][key]:
+                    self.assertEqual(len(sub_rule), sub_meta_rule_length[key])
+                    length[key] = len(data["rules"][key])
+
+            # Get one value of subject category scope
+            # FIXME: a better test would be to add a new value in scope and then add it to a new sub-rule
+            categories = get_url("/v3/OS-MOON/intra_extensions/{}/subject_categories".format(ie_id),
+                                 authtoken=True)["subject_categories"].keys()
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/subject_category_scope/{}".format(
+                ie_id, categories[0]), authtoken=True)
+            self.assertIn("subject_category_scope", data)
+            self.assertIs(type(data["subject_category_scope"]), dict)
+            subject_category = categories.pop()
+            subject_value = data["subject_category_scope"][subject_category].keys()[0]
+            # Get one value of object category scope
+            # FIXME: a better test would be to add a new value in scope and then add it to a new sub-rule
+            categories = get_url("/v3/OS-MOON/intra_extensions/{}/object_categories".format(ie_id),
+                                 authtoken=True)["object_categories"].keys()
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/object_category_scope/{}".format(
+                ie_id, categories[0]), authtoken=True)
+            self.assertIn("object_category_scope", data)
+            self.assertIs(type(data["object_category_scope"]), dict)
+            object_category = categories.pop()
+            object_value = data["object_category_scope"][object_category].keys()[0]
+            # Get one or more values in action category scope
+            _sub_meta_action_value = list()
+            for _sub_meta_cat in sub_meta_rule[relation]["action_categories"]:
+                data = get_url("/v3/OS-MOON/intra_extensions/{}/action_category_scope/{}".format(
+                               ie_id, _sub_meta_cat), authtoken=True)
+                action_value = data["action_category_scope"][_sub_meta_cat].keys()[0]
+                _sub_meta_action_value.append(action_value)
+            _sub_meta_rules = list()
+            _sub_meta_rules.append(subject_value)
+            _sub_meta_rules.extend(_sub_meta_action_value)
+            _sub_meta_rules.append(object_value)
+            # Must append True because the sub_rule need a boolean to know if it is a positive or a negative value
+            _sub_meta_rules.append(True)
+            post_data = {
+                "rule": _sub_meta_rules,
+                "relation": "relation_super"
+            }
+            # Add a new sub-rule
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/sub_rules".format(ie_id),
+                           post_data=post_data, authtoken=True)
+            self.assertIn("rules", data)
+            self.assertIs(type(data["rules"]), dict)
+            for key in data["rules"]:
+                self.assertIn(key, sub_meta_rule_relations)
+                self.assertGreater(len(data["rules"][key]), 0)
+                for sub_rule in data["rules"][key]:
+                    self.assertEqual(len(sub_rule), sub_meta_rule_length[key])
+                    if key == "relation_super":
+                        self.assertEqual(len(data["rules"][key]), length[key]+1)
+                    else:
+                        self.assertEqual(len(data["rules"][key]), length[key])
+
+            # Delete the new sub-rule
+            data = get_url("/v3/OS-MOON/intra_extensions/{}/sub_rules/{rel}/{rule}".format(
+                ie_id,
+                rel=post_data["relation"],
+                rule="+".join(map(lambda x: str(x), post_data["rule"]))),
+                method="DELETE", authtoken=True)
+            self.assertIn("rules", data)
+            self.assertIs(type(data["rules"]), dict)
+            for key in data["rules"]:
+                self.assertIn(key, sub_meta_rule_relations)
+                self.assertGreater(len(data["rules"][key]), 0)
+                for sub_rule in data["rules"][key]:
+                    if key == "relation_super":
+                        self.assertEqual(len(data["rules"][key]), length[key])
+                    else:
+                        self.assertEqual(len(data["rules"][key]), length[key])
+
+            # Delete the intra_extension
+            data = get_url("/v3/OS-MOON/intra_extensions/{}".format(ie_id), method="DELETE", authtoken=True)
+            data = get_url("/v3/OS-MOON/intra_extensions", authtoken=True)
+            self.assertNotIn(ie_id, data["intra_extensions"])
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/keystone-moon/keystone/tests/moon/func/test_func_api_log.py b/keystone-moon/keystone/tests/moon/func/test_func_api_log.py
new file mode 100644 (file)
index 0000000..f081aef
--- /dev/null
@@ -0,0 +1,148 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
+
+import unittest
+import json
+import httplib
+import time
+from uuid import uuid4
+import copy
+
+CREDENTIALS = {
+    "host": "127.0.0.1",
+    "port": "35357",
+    "login": "admin",
+    "password": "nomoresecrete",
+    "tenant_name": "demo",
+    "sessionid": "kxb50d9uusiywfcs2fiidmu1j5nsyckr",
+    "csrftoken": "",
+    "x-subject-token": ""
+}
+
+
+def get_url(url, post_data=None, delete_data=None, crsftoken=None, method="GET", authtoken=None):
+    # MOON_SERVER_IP["URL"] = url
+    # _url = "http://{HOST}:{PORT}".format(**MOON_SERVER_IP)
+    if post_data:
+        method = "POST"
+    if delete_data:
+        method = "DELETE"
+    # print("\033[32m{} {}\033[m".format(method, url))
+    conn = httplib.HTTPConnection(CREDENTIALS["host"], CREDENTIALS["port"])
+    headers = {
+        "Content-type": "application/x-www-form-urlencoded",
+        # "Accept": "text/plain",
+        "Accept": "text/plain,text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
+        'Cookie': 'sessionid={}'.format(CREDENTIALS["sessionid"]),
+    }
+    if crsftoken:
+        headers["Cookie"] = "csrftoken={}; sessionid={}; NG_TRANSLATE_LANG_KEY:\"en\"".format(crsftoken, CREDENTIALS["sessionid"])
+        CREDENTIALS["crsftoken"] = crsftoken
+    if authtoken:
+        headers["X-Auth-Token"] = CREDENTIALS["x-subject-token"]
+    if post_data:
+        method = "POST"
+        headers["Content-type"] = "application/json"
+        if crsftoken:
+            post_data = "&".join(map(lambda x: "=".join(x), post_data))
+        elif "crsftoken" in CREDENTIALS and "sessionid" in CREDENTIALS:
+            post_data = json.dumps(post_data)
+            headers["Cookie"] = "csrftoken={}; sessionid={}; NG_TRANSLATE_LANG_KEY:\"en\"".format(
+                CREDENTIALS["crsftoken"],
+                CREDENTIALS["sessionid"])
+        else:
+            post_data = json.dumps(post_data)
+        # conn.request(method, url, json.dumps(post_data), headers=headers)
+        conn.request(method, url, post_data, headers=headers)
+    elif delete_data:
+        method = "DELETE"
+        conn.request(method, url, json.dumps(delete_data), headers=headers)
+    else:
+        conn.request(method, url, headers=headers)
+    resp = conn.getresponse()
+    headers = resp.getheaders()
+    try:
+        CREDENTIALS["x-subject-token"] = dict(headers)["x-subject-token"]
+    except KeyError:
+        pass
+    if crsftoken:
+        sessionid_start = dict(headers)["set-cookie"].index("sessionid=")+len("sessionid=")
+        sessionid_end = dict(headers)["set-cookie"].index(";", sessionid_start)
+        sessionid = dict(headers)["set-cookie"][sessionid_start:sessionid_end]
+        CREDENTIALS["sessionid"] = sessionid
+    content = resp.read()
+    conn.close()
+    try:
+        return json.loads(content)
+    except ValueError:
+        return {"content": content}
+
+
+class IntraExtensionsTest(unittest.TestCase):
+
+    TIME_FORMAT = '%Y-%m-%d-%H:%M:%S'
+
+    def setUp(self):
+        post = {
+            "auth": {
+                "identity": {
+                    "methods": [
+                        "password"
+                    ],
+                    "password": {
+                        "user": {
+                            "domain": {
+                                "id": "Default"
+                            },
+                            "name": "admin",
+                            "password": "nomoresecrete"
+                        }
+                    }
+                },
+                "scope": {
+                    "project": {
+                        "domain": {
+                            "id": "Default"
+                        },
+                        "name": "demo"
+                    }
+                }
+            }
+        }
+        data = get_url("/v3/auth/tokens", post_data=post)
+        self.assertIn("token", data)
+
+    def tearDown(self):
+        pass
+
+    def test_get_logs(self):
+        all_data = get_url("/v3/OS-MOON/logs", authtoken=True)
+        len_all_data = len(all_data["logs"])
+        data_1 = all_data["logs"][len_all_data/2]
+        time_data_1 = data_1.split(" ")[0]
+        data_2 = all_data["logs"][len_all_data/2+10]
+        time_data_2 = data_2.split(" ")[0]
+        self.assertIn("logs", all_data)
+        data = get_url("/v3/OS-MOON/logs/filter=authz", authtoken=True)
+        self.assertIn("logs", data)
+        self.assertGreater(len_all_data, len(data["logs"]))
+        data = get_url("/v3/OS-MOON/logs/from={}".format(time_data_1), authtoken=True)
+        self.assertIn("logs", data)
+        self.assertGreater(len_all_data, len(data["logs"]))
+        # for _data in data["logs"]:
+        #     self.assertGreater(time.strptime(_data.split(" "), self.TIME_FORMAT),
+        #                        time.strptime(time_data_1, self.TIME_FORMAT))
+        data = get_url("/v3/OS-MOON/logs/from={},to={}".format(time_data_1, time_data_2), authtoken=True)
+        self.assertIn("logs", data)
+        self.assertGreater(len_all_data, len(data["logs"]))
+        self.assertEqual(10, len(data["logs"]))
+        data = get_url("/v3/OS-MOON/logs/event_number=20", authtoken=True)
+        self.assertIn("logs", data)
+        self.assertGreater(len_all_data, len(data["logs"]))
+        self.assertEqual(20, len(data["logs"]))
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/keystone-moon/keystone/tests/moon/func/test_func_api_tenant.py b/keystone-moon/keystone/tests/moon/func/test_func_api_tenant.py
new file mode 100644 (file)
index 0000000..c52e068
--- /dev/null
@@ -0,0 +1,154 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
+
+import unittest
+import json
+import httplib
+import time
+from uuid import uuid4
+import copy
+
+CREDENTIALS = {
+    "host": "127.0.0.1",
+    "port": "35357",
+    "login": "admin",
+    "password": "nomoresecrete",
+    "tenant_name": "demo",
+    "sessionid": "kxb50d9uusiywfcs2fiidmu1j5nsyckr",
+    "csrftoken": "",
+    "x-subject-token": ""
+}
+
+
+def get_url(url, post_data=None, delete_data=None, crsftoken=None, method="GET", authtoken=None):
+    # MOON_SERVER_IP["URL"] = url
+    # _url = "http://{HOST}:{PORT}".format(**MOON_SERVER_IP)
+    if post_data:
+        method = "POST"
+    if delete_data:
+        method = "DELETE"
+    # print("\033[32m{} {}\033[m".format(method, url))
+    conn = httplib.HTTPConnection(CREDENTIALS["host"], CREDENTIALS["port"])
+    headers = {
+        "Content-type": "application/x-www-form-urlencoded",
+        # "Accept": "text/plain",
+        "Accept": "text/plain,text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
+        'Cookie': 'sessionid={}'.format(CREDENTIALS["sessionid"]),
+    }
+    if crsftoken:
+        headers["Cookie"] = "csrftoken={}; sessionid={}; NG_TRANSLATE_LANG_KEY:\"en\"".format(crsftoken, CREDENTIALS["sessionid"])
+        CREDENTIALS["crsftoken"] = crsftoken
+    if authtoken:
+        headers["X-Auth-Token"] = CREDENTIALS["x-subject-token"]
+    if post_data:
+        method = "POST"
+        headers["Content-type"] = "application/json"
+        if crsftoken:
+            post_data = "&".join(map(lambda x: "=".join(x), post_data))
+        elif "crsftoken" in CREDENTIALS and "sessionid" in CREDENTIALS:
+            post_data = json.dumps(post_data)
+            headers["Cookie"] = "csrftoken={}; sessionid={}; NG_TRANSLATE_LANG_KEY:\"en\"".format(
+                CREDENTIALS["crsftoken"],
+                CREDENTIALS["sessionid"])
+        else:
+            post_data = json.dumps(post_data)
+        # conn.request(method, url, json.dumps(post_data), headers=headers)
+        conn.request(method, url, post_data, headers=headers)
+    elif delete_data:
+        method = "DELETE"
+        conn.request(method, url, json.dumps(delete_data), headers=headers)
+    else:
+        conn.request(method, url, headers=headers)
+    resp = conn.getresponse()
+    headers = resp.getheaders()
+    try:
+        CREDENTIALS["x-subject-token"] = dict(headers)["x-subject-token"]
+    except KeyError:
+        pass
+    if crsftoken:
+        sessionid_start = dict(headers)["set-cookie"].index("sessionid=")+len("sessionid=")
+        sessionid_end = dict(headers)["set-cookie"].index(";", sessionid_start)
+        sessionid = dict(headers)["set-cookie"][sessionid_start:sessionid_end]
+        CREDENTIALS["sessionid"] = sessionid
+    content = resp.read()
+    conn.close()
+    try:
+        return json.loads(content)
+    except ValueError:
+        return {"content": content}
+
+
+class MappingsTest(unittest.TestCase):
+
+    def setUp(self):
+        post = {
+            "auth": {
+                "identity": {
+                    "methods": [
+                        "password"
+                    ],
+                    "password": {
+                        "user": {
+                            "domain": {
+                                "id": "Default"
+                            },
+                            "name": "admin",
+                            "password": "nomoresecrete"
+                        }
+                    }
+                },
+                "scope": {
+                    "project": {
+                        "domain": {
+                            "id": "Default"
+                        },
+                        "name": "demo"
+                    }
+                }
+            }
+        }
+        data = get_url("/v3/auth/tokens", post_data=post)
+        self.assertIn("token", data)
+
+    def tearDown(self):
+        pass
+
+    def test_get_tenants(self):
+        data = get_url("/v3/OS-MOON/tenants", authtoken=True)
+        self.assertIn("tenants", data)
+        self.assertIsInstance(data["tenants"], list)
+        print(data)
+
+    def test_add_delete_mapping(self):
+        data = get_url("/v3/projects", authtoken=True)
+        project_id = None
+        for project in data["projects"]:
+            if project["name"] == "demo":
+                project_id = project["id"]
+        data = get_url("/v3/OS-MOON/tenant",
+                       post_data={
+                           "id": project_id,
+                            "name": "tenant1",
+                            "authz": "intra_extension_uuid1",
+                            "admin": "intra_extension_uuid2"
+                       },
+                       authtoken=True)
+        self.assertIn("tenant", data)
+        self.assertIsInstance(data["tenant"], dict)
+        uuid = data["tenant"]["id"]
+        data = get_url("/v3/OS-MOON/tenants", authtoken=True)
+        self.assertIn("tenants", data)
+        self.assertIsInstance(data["tenants"], list)
+        print(data)
+        data = get_url("/v3/OS-MOON/tenant/{}".format(uuid),
+                       method="DELETE",
+                       authtoken=True)
+        data = get_url("/v3/OS-MOON/tenants", authtoken=True)
+        self.assertIn("tenants", data)
+        self.assertIsInstance(data["tenants"], list)
+        print(data)
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/keystone-moon/keystone/tests/moon/unit/__init__.py b/keystone-moon/keystone/tests/moon/unit/__init__.py
new file mode 100644 (file)
index 0000000..1b678d5
--- /dev/null
@@ -0,0 +1,4 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
diff --git a/keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_admin.py b/keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_admin.py
new file mode 100644 (file)
index 0000000..03ef845
--- /dev/null
@@ -0,0 +1,1229 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
+
+"""Unit tests for core IntraExtensionAdminManager"""
+
+import json
+import os
+import uuid
+from oslo_config import cfg
+from keystone.tests import unit as tests
+from keystone.contrib.moon.core import IntraExtensionAdminManager, IntraExtensionAuthzManager
+from keystone.tests.unit.ksfixtures import database
+from keystone import resource
+from keystone.contrib.moon.exception import *
+from keystone.tests.unit import default_fixtures
+from keystone.contrib.moon.core import LogManager, TenantManager
+
+CONF = cfg.CONF
+
+USER_ADMIN = {
+    'name': 'admin',
+    'domain_id': "default",
+    'password': 'admin'
+}
+
+IE = {
+    "name": "test IE",
+    "policymodel": "policy_rbac_authz",
+    "description": "a simple description."
+}
+
+class TestIntraExtensionAdminManager(tests.TestCase):
+
+    def setUp(self):
+        self.useFixture(database.Database())
+        super(TestIntraExtensionAdminManager, self).setUp()
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+        self.manager = IntraExtensionAdminManager()
+
+    def __get_key_from_value(self, value, values_dict):
+        return filter(lambda v: v[1] == value, values_dict.iteritems())[0][0]
+
+    def load_extra_backends(self):
+        return {
+            "moonlog_api": LogManager(),
+            "tenant_api": TenantManager(),
+            # "resource_api": resource.Manager(),
+        }
+
+    def config_overrides(self):
+        super(TestIntraExtensionAdminManager, self).config_overrides()
+        self.policy_directory = '../../../examples/moon/policies'
+        self.config_fixture.config(
+            group='moon',
+            intraextension_driver='keystone.contrib.moon.backends.sql.IntraExtensionConnector')
+        self.config_fixture.config(
+            group='moon',
+            policy_directory=self.policy_directory)
+
+    def create_intra_extension(self, policy_model="policy_rbac_admin"):
+        # Create the admin user because IntraExtension needs it
+        self.admin = self.identity_api.create_user(USER_ADMIN)
+        IE["policymodel"] = policy_model
+        self.ref = self.manager.load_intra_extension(IE)
+        self.assertIsInstance(self.ref, dict)
+        self.create_tenant(self.ref["id"])
+
+    def create_tenant(self, authz_uuid):
+        tenant = {
+            "id": uuid.uuid4().hex,
+            "name": "TestAuthzIntraExtensionManager",
+            "enabled": True,
+            "description": "",
+            "domain_id": "default"
+        }
+        project = self.resource_api.create_project(tenant["id"], tenant)
+        mapping = self.tenant_api.set_tenant_dict(project["id"], project["name"], authz_uuid, None)
+        self.assertIsInstance(mapping, dict)
+        self.assertIn("authz", mapping)
+        self.assertEqual(mapping["authz"], authz_uuid)
+        return mapping
+
+    def create_user(self, username="TestAdminIntraExtensionManagerUser"):
+        user = {
+            "id": uuid.uuid4().hex,
+            "name": username,
+            "enabled": True,
+            "description": "",
+            "domain_id": "default"
+        }
+        _user = self.identity_api.create_user(user)
+        return _user
+
+    def delete_admin_intra_extension(self):
+        self.manager.delete_intra_extension(self.ref["id"])
+
+    def test_subjects(self):
+        self.create_intra_extension()
+        
+        subjects = self.manager.get_subject_dict("admin", self.ref["id"])
+        self.assertIsInstance(subjects, dict)
+        self.assertIn("subjects", subjects)
+        self.assertIn("id", subjects)
+        self.assertIn("intra_extension_uuid", subjects)
+        self.assertEqual(self.ref["id"], subjects["intra_extension_uuid"])
+        self.assertIsInstance(subjects["subjects"], dict)
+
+        new_subject = self.create_user()
+        new_subjects = dict()
+        new_subjects[new_subject["id"]] = new_subject["name"]
+        subjects = self.manager.set_subject_dict("admin", self.ref["id"], new_subjects)
+        self.assertIsInstance(subjects, dict)
+        self.assertIn("subjects", subjects)
+        self.assertIn("id", subjects)
+        self.assertIn("intra_extension_uuid", subjects)
+        self.assertEqual(self.ref["id"], subjects["intra_extension_uuid"])
+        self.assertEqual(subjects["subjects"], new_subjects)
+        self.assertIn(new_subject["id"], subjects["subjects"])
+        
+        # Delete the new subject
+        self.manager.del_subject("admin", self.ref["id"], new_subject["id"])
+        subjects = self.manager.get_subject_dict("admin", self.ref["id"])
+        self.assertIsInstance(subjects, dict)
+        self.assertIn("subjects", subjects)
+        self.assertIn("id", subjects)
+        self.assertIn("intra_extension_uuid", subjects)
+        self.assertEqual(self.ref["id"], subjects["intra_extension_uuid"])
+        self.assertNotIn(new_subject["id"], subjects["subjects"])
+        
+        # Add a particular subject
+        subjects = self.manager.add_subject_dict("admin", self.ref["id"], new_subject["id"])
+        self.assertIsInstance(subjects, dict)
+        self.assertIn("subject", subjects)
+        self.assertIn("uuid", subjects["subject"])
+        self.assertEqual(new_subject["name"], subjects["subject"]["name"])
+        subjects = self.manager.get_subject_dict("admin", self.ref["id"])
+        self.assertIsInstance(subjects, dict)
+        self.assertIn("subjects", subjects)
+        self.assertIn("id", subjects)
+        self.assertIn("intra_extension_uuid", subjects)
+        self.assertEqual(self.ref["id"], subjects["intra_extension_uuid"])
+        self.assertIn(new_subject["id"], subjects["subjects"])
+
+    def test_objects(self):
+        self.create_intra_extension()
+        
+        objects = self.manager.get_object_dict("admin", self.ref["id"])
+        self.assertIsInstance(objects, dict)
+        self.assertIn("objects", objects)
+        self.assertIn("id", objects)
+        self.assertIn("intra_extension_uuid", objects)
+        self.assertEqual(self.ref["id"], objects["intra_extension_uuid"])
+        self.assertIsInstance(objects["objects"], dict)
+
+        new_object = self.create_user()
+        new_objects = dict()
+        new_objects[new_object["id"]] = new_object["name"]
+        objects = self.manager.set_object_dict("admin", self.ref["id"], new_objects)
+        self.assertIsInstance(objects, dict)
+        self.assertIn("objects", objects)
+        self.assertIn("id", objects)
+        self.assertIn("intra_extension_uuid", objects)
+        self.assertEqual(self.ref["id"], objects["intra_extension_uuid"])
+        self.assertEqual(objects["objects"], new_objects)
+        self.assertIn(new_object["id"], objects["objects"])
+        
+        # Delete the new object
+        self.manager.del_object("admin", self.ref["id"], new_object["id"])
+        objects = self.manager.get_object_dict("admin", self.ref["id"])
+        self.assertIsInstance(objects, dict)
+        self.assertIn("objects", objects)
+        self.assertIn("id", objects)
+        self.assertIn("intra_extension_uuid", objects)
+        self.assertEqual(self.ref["id"], objects["intra_extension_uuid"])
+        self.assertNotIn(new_object["id"], objects["objects"])
+        
+        # Add a particular object
+        objects = self.manager.add_object_dict("admin", self.ref["id"], new_object["name"])
+        self.assertIsInstance(objects, dict)
+        self.assertIn("object", objects)
+        self.assertIn("uuid", objects["object"])
+        self.assertEqual(new_object["name"], objects["object"]["name"])
+        new_object["id"] = objects["object"]["uuid"]
+        objects = self.manager.get_object_dict("admin", self.ref["id"])
+        self.assertIsInstance(objects, dict)
+        self.assertIn("objects", objects)
+        self.assertIn("id", objects)
+        self.assertIn("intra_extension_uuid", objects)
+        self.assertEqual(self.ref["id"], objects["intra_extension_uuid"])
+        self.assertIn(new_object["id"], objects["objects"])
+
+    def test_actions(self):
+        self.create_intra_extension()
+
+        actions = self.manager.get_action_dict("admin", self.ref["id"])
+        self.assertIsInstance(actions, dict)
+        self.assertIn("actions", actions)
+        self.assertIn("id", actions)
+        self.assertIn("intra_extension_uuid", actions)
+        self.assertEqual(self.ref["id"], actions["intra_extension_uuid"])
+        self.assertIsInstance(actions["actions"], dict)
+
+        new_action = self.create_user()
+        new_actions = dict()
+        new_actions[new_action["id"]] = new_action["name"]
+        actions = self.manager.set_action_dict("admin", self.ref["id"], new_actions)
+        self.assertIsInstance(actions, dict)
+        self.assertIn("actions", actions)
+        self.assertIn("id", actions)
+        self.assertIn("intra_extension_uuid", actions)
+        self.assertEqual(self.ref["id"], actions["intra_extension_uuid"])
+        self.assertEqual(actions["actions"], new_actions)
+        self.assertIn(new_action["id"], actions["actions"])
+
+        # Delete the new action
+        self.manager.del_action("admin", self.ref["id"], new_action["id"])
+        actions = self.manager.get_action_dict("admin", self.ref["id"])
+        self.assertIsInstance(actions, dict)
+        self.assertIn("actions", actions)
+        self.assertIn("id", actions)
+        self.assertIn("intra_extension_uuid", actions)
+        self.assertEqual(self.ref["id"], actions["intra_extension_uuid"])
+        self.assertNotIn(new_action["id"], actions["actions"])
+
+        # Add a particular action
+        actions = self.manager.add_action_dict("admin", self.ref["id"], new_action["name"])
+        self.assertIsInstance(actions, dict)
+        self.assertIn("action", actions)
+        self.assertIn("uuid", actions["action"])
+        self.assertEqual(new_action["name"], actions["action"]["name"])
+        new_action["id"] = actions["action"]["uuid"]
+        actions = self.manager.get_action_dict("admin", self.ref["id"])
+        self.assertIsInstance(actions, dict)
+        self.assertIn("actions", actions)
+        self.assertIn("id", actions)
+        self.assertIn("intra_extension_uuid", actions)
+        self.assertEqual(self.ref["id"], actions["intra_extension_uuid"])
+        self.assertIn(new_action["id"], actions["actions"])
+
+    def test_subject_categories(self):
+        self.create_intra_extension()
+
+        subject_categories = self.manager.get_subject_category_dict("admin", self.ref["id"])
+        self.assertIsInstance(subject_categories, dict)
+        self.assertIn("subject_categories", subject_categories)
+        self.assertIn("id", subject_categories)
+        self.assertIn("intra_extension_uuid", subject_categories)
+        self.assertEqual(self.ref["id"], subject_categories["intra_extension_uuid"])
+        self.assertIsInstance(subject_categories["subject_categories"], dict)
+
+        new_subject_category = {"id": uuid.uuid4().hex, "name": "subject_category_test"}
+        new_subject_categories = dict()
+        new_subject_categories[new_subject_category["id"]] = new_subject_category["name"]
+        subject_categories = self.manager.set_subject_category_dict("admin", self.ref["id"], new_subject_categories)
+        self.assertIsInstance(subject_categories, dict)
+        self.assertIn("subject_categories", subject_categories)
+        self.assertIn("id", subject_categories)
+        self.assertIn("intra_extension_uuid", subject_categories)
+        self.assertEqual(self.ref["id"], subject_categories["intra_extension_uuid"])
+        self.assertEqual(subject_categories["subject_categories"], new_subject_categories)
+        self.assertIn(new_subject_category["id"], subject_categories["subject_categories"])
+
+        # Delete the new subject_category
+        self.manager.del_subject_category("admin", self.ref["id"], new_subject_category["id"])
+        subject_categories = self.manager.get_subject_category_dict("admin", self.ref["id"])
+        self.assertIsInstance(subject_categories, dict)
+        self.assertIn("subject_categories", subject_categories)
+        self.assertIn("id", subject_categories)
+        self.assertIn("intra_extension_uuid", subject_categories)
+        self.assertEqual(self.ref["id"], subject_categories["intra_extension_uuid"])
+        self.assertNotIn(new_subject_category["id"], subject_categories["subject_categories"])
+
+        # Add a particular subject_category
+        subject_categories = self.manager.add_subject_category_dict(
+            "admin",
+            self.ref["id"],
+            new_subject_category["name"])
+        self.assertIsInstance(subject_categories, dict)
+        self.assertIn("subject_category", subject_categories)
+        self.assertIn("uuid", subject_categories["subject_category"])
+        self.assertEqual(new_subject_category["name"], subject_categories["subject_category"]["name"])
+        new_subject_category["id"] = subject_categories["subject_category"]["uuid"]
+        subject_categories = self.manager.get_subject_category_dict(
+            "admin",
+            self.ref["id"])
+        self.assertIsInstance(subject_categories, dict)
+        self.assertIn("subject_categories", subject_categories)
+        self.assertIn("id", subject_categories)
+        self.assertIn("intra_extension_uuid", subject_categories)
+        self.assertEqual(self.ref["id"], subject_categories["intra_extension_uuid"])
+        self.assertIn(new_subject_category["id"], subject_categories["subject_categories"])
+
+    def test_object_categories(self):
+        self.create_intra_extension()
+
+        object_categories = self.manager.get_object_category_dict("admin", self.ref["id"])
+        self.assertIsInstance(object_categories, dict)
+        self.assertIn("object_categories", object_categories)
+        self.assertIn("id", object_categories)
+        self.assertIn("intra_extension_uuid", object_categories)
+        self.assertEqual(self.ref["id"], object_categories["intra_extension_uuid"])
+        self.assertIsInstance(object_categories["object_categories"], dict)
+
+        new_object_category = {"id": uuid.uuid4().hex, "name": "object_category_test"}
+        new_object_categories = dict()
+        new_object_categories[new_object_category["id"]] = new_object_category["name"]
+        object_categories = self.manager.set_object_category_dict("admin", self.ref["id"], new_object_categories)
+        self.assertIsInstance(object_categories, dict)
+        self.assertIn("object_categories", object_categories)
+        self.assertIn("id", object_categories)
+        self.assertIn("intra_extension_uuid", object_categories)
+        self.assertEqual(self.ref["id"], object_categories["intra_extension_uuid"])
+        self.assertEqual(object_categories["object_categories"], new_object_categories)
+        self.assertIn(new_object_category["id"], object_categories["object_categories"])
+
+        # Delete the new object_category
+        self.manager.del_object_category("admin", self.ref["id"], new_object_category["id"])
+        object_categories = self.manager.get_object_category_dict("admin", self.ref["id"])
+        self.assertIsInstance(object_categories, dict)
+        self.assertIn("object_categories", object_categories)
+        self.assertIn("id", object_categories)
+        self.assertIn("intra_extension_uuid", object_categories)
+        self.assertEqual(self.ref["id"], object_categories["intra_extension_uuid"])
+        self.assertNotIn(new_object_category["id"], object_categories["object_categories"])
+
+        # Add a particular object_category
+        object_categories = self.manager.add_object_category_dict(
+            "admin",
+            self.ref["id"],
+            new_object_category["name"])
+        self.assertIsInstance(object_categories, dict)
+        self.assertIn("object_category", object_categories)
+        self.assertIn("uuid", object_categories["object_category"])
+        self.assertEqual(new_object_category["name"], object_categories["object_category"]["name"])
+        new_object_category["id"] = object_categories["object_category"]["uuid"]
+        object_categories = self.manager.get_object_category_dict(
+            "admin",
+            self.ref["id"])
+        self.assertIsInstance(object_categories, dict)
+        self.assertIn("object_categories", object_categories)
+        self.assertIn("id", object_categories)
+        self.assertIn("intra_extension_uuid", object_categories)
+        self.assertEqual(self.ref["id"], object_categories["intra_extension_uuid"])
+        self.assertIn(new_object_category["id"], object_categories["object_categories"])
+
+    def test_action_categories(self):
+        self.create_intra_extension()
+
+        action_categories = self.manager.get_action_category_dict("admin", self.ref["id"])
+        self.assertIsInstance(action_categories, dict)
+        self.assertIn("action_categories", action_categories)
+        self.assertIn("id", action_categories)
+        self.assertIn("intra_extension_uuid", action_categories)
+        self.assertEqual(self.ref["id"], action_categories["intra_extension_uuid"])
+        self.assertIsInstance(action_categories["action_categories"], dict)
+
+        new_action_category = {"id": uuid.uuid4().hex, "name": "action_category_test"}
+        new_action_categories = dict()
+        new_action_categories[new_action_category["id"]] = new_action_category["name"]
+        action_categories = self.manager.set_action_category_dict("admin", self.ref["id"], new_action_categories)
+        self.assertIsInstance(action_categories, dict)
+        self.assertIn("action_categories", action_categories)
+        self.assertIn("id", action_categories)
+        self.assertIn("intra_extension_uuid", action_categories)
+        self.assertEqual(self.ref["id"], action_categories["intra_extension_uuid"])
+        self.assertEqual(action_categories["action_categories"], new_action_categories)
+        self.assertIn(new_action_category["id"], action_categories["action_categories"])
+
+        # Delete the new action_category
+        self.manager.del_action_category("admin", self.ref["id"], new_action_category["id"])
+        action_categories = self.manager.get_action_category_dict("admin", self.ref["id"])
+        self.assertIsInstance(action_categories, dict)
+        self.assertIn("action_categories", action_categories)
+        self.assertIn("id", action_categories)
+        self.assertIn("intra_extension_uuid", action_categories)
+        self.assertEqual(self.ref["id"], action_categories["intra_extension_uuid"])
+        self.assertNotIn(new_action_category["id"], action_categories["action_categories"])
+
+        # Add a particular action_category
+        action_categories = self.manager.add_action_category_dict(
+            "admin",
+            self.ref["id"],
+            new_action_category["name"])
+        self.assertIsInstance(action_categories, dict)
+        self.assertIn("action_category", action_categories)
+        self.assertIn("uuid", action_categories["action_category"])
+        self.assertEqual(new_action_category["name"], action_categories["action_category"]["name"])
+        new_action_category["id"] = action_categories["action_category"]["uuid"]
+        action_categories = self.manager.get_action_category_dict(
+            "admin",
+            self.ref["id"])
+        self.assertIsInstance(action_categories, dict)
+        self.assertIn("action_categories", action_categories)
+        self.assertIn("id", action_categories)
+        self.assertIn("intra_extension_uuid", action_categories)
+        self.assertEqual(self.ref["id"], action_categories["intra_extension_uuid"])
+        self.assertIn(new_action_category["id"], action_categories["action_categories"])
+
+    def test_subject_category_scope(self):
+        self.create_intra_extension()
+
+        subject_categories = self.manager.set_subject_category_dict(
+            "admin",
+            self.ref["id"],
+            {
+                uuid.uuid4().hex: "admin",
+                uuid.uuid4().hex: "dev",
+            }
+        )
+
+        for subject_category in subject_categories["subject_categories"]:
+            subject_category_scope = self.manager.get_subject_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                subject_category)
+            self.assertIsInstance(subject_category_scope, dict)
+            self.assertIn("subject_category_scope", subject_category_scope)
+            self.assertIn("id", subject_category_scope)
+            self.assertIn("intra_extension_uuid", subject_category_scope)
+            self.assertEqual(self.ref["id"], subject_category_scope["intra_extension_uuid"])
+            self.assertIsInstance(subject_category_scope["subject_category_scope"], dict)
+
+            new_subject_category_scope = dict()
+            new_subject_category_scope_uuid = uuid.uuid4().hex
+            new_subject_category_scope[new_subject_category_scope_uuid] = "new_subject_category_scope"
+            subject_category_scope = self.manager.set_subject_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                subject_category,
+                new_subject_category_scope)
+            self.assertIsInstance(subject_category_scope, dict)
+            self.assertIn("subject_category_scope", subject_category_scope)
+            self.assertIn("id", subject_category_scope)
+            self.assertIn("intra_extension_uuid", subject_category_scope)
+            self.assertEqual(self.ref["id"], subject_category_scope["intra_extension_uuid"])
+            self.assertIn(new_subject_category_scope[new_subject_category_scope_uuid],
+                          subject_category_scope["subject_category_scope"][subject_category].values())
+
+            # Delete the new subject_category_scope
+            self.manager.del_subject_category_scope(
+                "admin",
+                self.ref["id"],
+                subject_category,
+                new_subject_category_scope_uuid)
+            subject_category_scope = self.manager.get_subject_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                subject_category)
+            self.assertIsInstance(subject_category_scope, dict)
+            self.assertIn("subject_category_scope", subject_category_scope)
+            self.assertIn("id", subject_category_scope)
+            self.assertIn("intra_extension_uuid", subject_category_scope)
+            self.assertEqual(self.ref["id"], subject_category_scope["intra_extension_uuid"])
+            self.assertNotIn(new_subject_category_scope_uuid, subject_category_scope["subject_category_scope"])
+
+            # Add a particular subject_category_scope
+            subject_category_scope = self.manager.add_subject_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                subject_category,
+                new_subject_category_scope[new_subject_category_scope_uuid])
+            self.assertIsInstance(subject_category_scope, dict)
+            self.assertIn("subject_category_scope", subject_category_scope)
+            self.assertIn("uuid", subject_category_scope["subject_category_scope"])
+            self.assertEqual(new_subject_category_scope[new_subject_category_scope_uuid],
+                             subject_category_scope["subject_category_scope"]["name"])
+            subject_category_scope = self.manager.get_subject_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                subject_category)
+            self.assertIsInstance(subject_category_scope, dict)
+            self.assertIn("subject_category_scope", subject_category_scope)
+            self.assertIn("id", subject_category_scope)
+            self.assertIn("intra_extension_uuid", subject_category_scope)
+            self.assertEqual(self.ref["id"], subject_category_scope["intra_extension_uuid"])
+            self.assertNotIn(new_subject_category_scope_uuid, subject_category_scope["subject_category_scope"])
+
+    def test_object_category_scope(self):
+        self.create_intra_extension()
+
+        object_categories = self.manager.set_object_category_dict(
+            "admin",
+            self.ref["id"],
+            {
+                uuid.uuid4().hex: "id",
+                uuid.uuid4().hex: "domain",
+            }
+        )
+
+        for object_category in object_categories["object_categories"]:
+            object_category_scope = self.manager.get_object_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                object_category)
+            self.assertIsInstance(object_category_scope, dict)
+            self.assertIn("object_category_scope", object_category_scope)
+            self.assertIn("id", object_category_scope)
+            self.assertIn("intra_extension_uuid", object_category_scope)
+            self.assertEqual(self.ref["id"], object_category_scope["intra_extension_uuid"])
+            self.assertIsInstance(object_category_scope["object_category_scope"], dict)
+
+            new_object_category_scope = dict()
+            new_object_category_scope_uuid = uuid.uuid4().hex
+            new_object_category_scope[new_object_category_scope_uuid] = "new_object_category_scope"
+            object_category_scope = self.manager.set_object_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                object_category,
+                new_object_category_scope)
+            self.assertIsInstance(object_category_scope, dict)
+            self.assertIn("object_category_scope", object_category_scope)
+            self.assertIn("id", object_category_scope)
+            self.assertIn("intra_extension_uuid", object_category_scope)
+            self.assertEqual(self.ref["id"], object_category_scope["intra_extension_uuid"])
+            self.assertIn(new_object_category_scope[new_object_category_scope_uuid],
+                          object_category_scope["object_category_scope"][object_category].values())
+
+            # Delete the new object_category_scope
+            self.manager.del_object_category_scope(
+                "admin",
+                self.ref["id"],
+                object_category,
+                new_object_category_scope_uuid)
+            object_category_scope = self.manager.get_object_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                object_category)
+            self.assertIsInstance(object_category_scope, dict)
+            self.assertIn("object_category_scope", object_category_scope)
+            self.assertIn("id", object_category_scope)
+            self.assertIn("intra_extension_uuid", object_category_scope)
+            self.assertEqual(self.ref["id"], object_category_scope["intra_extension_uuid"])
+            self.assertNotIn(new_object_category_scope_uuid, object_category_scope["object_category_scope"])
+
+            # Add a particular object_category_scope
+            object_category_scope = self.manager.add_object_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                object_category,
+                new_object_category_scope[new_object_category_scope_uuid])
+            self.assertIsInstance(object_category_scope, dict)
+            self.assertIn("object_category_scope", object_category_scope)
+            self.assertIn("uuid", object_category_scope["object_category_scope"])
+            self.assertEqual(new_object_category_scope[new_object_category_scope_uuid],
+                             object_category_scope["object_category_scope"]["name"])
+            object_category_scope = self.manager.get_object_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                object_category)
+            self.assertIsInstance(object_category_scope, dict)
+            self.assertIn("object_category_scope", object_category_scope)
+            self.assertIn("id", object_category_scope)
+            self.assertIn("intra_extension_uuid", object_category_scope)
+            self.assertEqual(self.ref["id"], object_category_scope["intra_extension_uuid"])
+            self.assertNotIn(new_object_category_scope_uuid, object_category_scope["object_category_scope"])
+
+    def test_action_category_scope(self):
+        self.create_intra_extension()
+
+        action_categories = self.manager.set_action_category_dict(
+            "admin",
+            self.ref["id"],
+            {
+                uuid.uuid4().hex: "compute",
+                uuid.uuid4().hex: "identity",
+            }
+        )
+
+        for action_category in action_categories["action_categories"]:
+            action_category_scope = self.manager.get_action_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                action_category)
+            self.assertIsInstance(action_category_scope, dict)
+            self.assertIn("action_category_scope", action_category_scope)
+            self.assertIn("id", action_category_scope)
+            self.assertIn("intra_extension_uuid", action_category_scope)
+            self.assertEqual(self.ref["id"], action_category_scope["intra_extension_uuid"])
+            self.assertIsInstance(action_category_scope["action_category_scope"], dict)
+
+            new_action_category_scope = dict()
+            new_action_category_scope_uuid = uuid.uuid4().hex
+            new_action_category_scope[new_action_category_scope_uuid] = "new_action_category_scope"
+            action_category_scope = self.manager.set_action_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                action_category,
+                new_action_category_scope)
+            self.assertIsInstance(action_category_scope, dict)
+            self.assertIn("action_category_scope", action_category_scope)
+            self.assertIn("id", action_category_scope)
+            self.assertIn("intra_extension_uuid", action_category_scope)
+            self.assertEqual(self.ref["id"], action_category_scope["intra_extension_uuid"])
+            self.assertIn(new_action_category_scope[new_action_category_scope_uuid],
+                          action_category_scope["action_category_scope"][action_category].values())
+
+            # Delete the new action_category_scope
+            self.manager.del_action_category_scope(
+                "admin",
+                self.ref["id"],
+                action_category,
+                new_action_category_scope_uuid)
+            action_category_scope = self.manager.get_action_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                action_category)
+            self.assertIsInstance(action_category_scope, dict)
+            self.assertIn("action_category_scope", action_category_scope)
+            self.assertIn("id", action_category_scope)
+            self.assertIn("intra_extension_uuid", action_category_scope)
+            self.assertEqual(self.ref["id"], action_category_scope["intra_extension_uuid"])
+            self.assertNotIn(new_action_category_scope_uuid, action_category_scope["action_category_scope"])
+
+            # Add a particular action_category_scope
+            action_category_scope = self.manager.add_action_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                action_category,
+                new_action_category_scope[new_action_category_scope_uuid])
+            self.assertIsInstance(action_category_scope, dict)
+            self.assertIn("action_category_scope", action_category_scope)
+            self.assertIn("uuid", action_category_scope["action_category_scope"])
+            self.assertEqual(new_action_category_scope[new_action_category_scope_uuid],
+                             action_category_scope["action_category_scope"]["name"])
+            action_category_scope = self.manager.get_action_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                action_category)
+            self.assertIsInstance(action_category_scope, dict)
+            self.assertIn("action_category_scope", action_category_scope)
+            self.assertIn("id", action_category_scope)
+            self.assertIn("intra_extension_uuid", action_category_scope)
+            self.assertEqual(self.ref["id"], action_category_scope["intra_extension_uuid"])
+            self.assertNotIn(new_action_category_scope_uuid, action_category_scope["action_category_scope"])
+
+    def test_subject_category_assignment(self):
+        self.create_intra_extension()
+
+        new_subject = self.create_user()
+        new_subjects = dict()
+        new_subjects[new_subject["id"]] = new_subject["name"]
+        subjects = self.manager.set_subject_dict("admin", self.ref["id"], new_subjects)
+
+        new_subject_category_uuid = uuid.uuid4().hex
+        new_subject_category_value = "role"
+        subject_categories = self.manager.set_subject_category_dict(
+            "admin",
+            self.ref["id"],
+            {
+                new_subject_category_uuid: new_subject_category_value
+            }
+        )
+
+        for subject_category in subject_categories["subject_categories"]:
+            subject_category_scope = self.manager.get_subject_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                subject_category)
+            self.assertIsInstance(subject_category_scope, dict)
+            self.assertIn("subject_category_scope", subject_category_scope)
+            self.assertIn("id", subject_category_scope)
+            self.assertIn("intra_extension_uuid", subject_category_scope)
+            self.assertEqual(self.ref["id"], subject_category_scope["intra_extension_uuid"])
+            self.assertIsInstance(subject_category_scope["subject_category_scope"], dict)
+
+            new_subject_category_scope = dict()
+            new_subject_category_scope_uuid = uuid.uuid4().hex
+            new_subject_category_scope[new_subject_category_scope_uuid] = "admin"
+            subject_category_scope = self.manager.set_subject_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                subject_category,
+                new_subject_category_scope)
+            self.assertIsInstance(subject_category_scope, dict)
+            self.assertIn("subject_category_scope", subject_category_scope)
+            self.assertIn("id", subject_category_scope)
+            self.assertIn("intra_extension_uuid", subject_category_scope)
+            self.assertEqual(self.ref["id"], subject_category_scope["intra_extension_uuid"])
+            self.assertIn(new_subject_category_scope[new_subject_category_scope_uuid],
+                          subject_category_scope["subject_category_scope"][subject_category].values())
+
+            new_subject_category_scope2 = dict()
+            new_subject_category_scope2_uuid = uuid.uuid4().hex
+            new_subject_category_scope2[new_subject_category_scope2_uuid] = "dev"
+            subject_category_scope = self.manager.set_subject_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                subject_category,
+                new_subject_category_scope2)
+            self.assertIsInstance(subject_category_scope, dict)
+            self.assertIn("subject_category_scope", subject_category_scope)
+            self.assertIn("id", subject_category_scope)
+            self.assertIn("intra_extension_uuid", subject_category_scope)
+            self.assertEqual(self.ref["id"], subject_category_scope["intra_extension_uuid"])
+            self.assertIn(new_subject_category_scope2[new_subject_category_scope2_uuid],
+                          subject_category_scope["subject_category_scope"][subject_category].values())
+
+            subject_category_assignments = self.manager.get_subject_category_assignment_dict(
+                "admin",
+                self.ref["id"],
+                new_subject["id"]
+            )
+            self.assertIsInstance(subject_category_assignments, dict)
+            self.assertIn("subject_category_assignments", subject_category_assignments)
+            self.assertIn("id", subject_category_assignments)
+            self.assertIn("intra_extension_uuid", subject_category_assignments)
+            self.assertEqual(self.ref["id"], subject_category_assignments["intra_extension_uuid"])
+            self.assertEqual({}, subject_category_assignments["subject_category_assignments"][new_subject["id"]])
+
+            subject_category_assignments = self.manager.set_subject_category_assignment_dict(
+                "admin",
+                self.ref["id"],
+                new_subject["id"],
+                {
+                    new_subject_category_uuid: [new_subject_category_scope_uuid, new_subject_category_scope2_uuid],
+                }
+            )
+            self.assertIsInstance(subject_category_assignments, dict)
+            self.assertIn("subject_category_assignments", subject_category_assignments)
+            self.assertIn("id", subject_category_assignments)
+            self.assertIn("intra_extension_uuid", subject_category_assignments)
+            self.assertEqual(self.ref["id"], subject_category_assignments["intra_extension_uuid"])
+            self.assertEqual(
+                {new_subject_category_uuid: [new_subject_category_scope_uuid, new_subject_category_scope2_uuid]},
+                subject_category_assignments["subject_category_assignments"][new_subject["id"]])
+            subject_category_assignments = self.manager.get_subject_category_assignment_dict(
+                "admin",
+                self.ref["id"],
+                new_subject["id"]
+            )
+            self.assertIsInstance(subject_category_assignments, dict)
+            self.assertIn("subject_category_assignments", subject_category_assignments)
+            self.assertIn("id", subject_category_assignments)
+            self.assertIn("intra_extension_uuid", subject_category_assignments)
+            self.assertEqual(self.ref["id"], subject_category_assignments["intra_extension_uuid"])
+            self.assertEqual(
+                {new_subject_category_uuid: [new_subject_category_scope_uuid, new_subject_category_scope2_uuid]},
+                subject_category_assignments["subject_category_assignments"][new_subject["id"]])
+
+            self.manager.del_subject_category_assignment(
+                "admin",
+                self.ref["id"],
+                new_subject["id"],
+                new_subject_category_uuid,
+                new_subject_category_scope_uuid
+            )
+            subject_category_assignments = self.manager.get_subject_category_assignment_dict(
+                "admin",
+                self.ref["id"],
+                new_subject["id"]
+            )
+            self.assertIsInstance(subject_category_assignments, dict)
+            self.assertIn("subject_category_assignments", subject_category_assignments)
+            self.assertIn("id", subject_category_assignments)
+            self.assertIn("intra_extension_uuid", subject_category_assignments)
+            self.assertEqual(self.ref["id"], subject_category_assignments["intra_extension_uuid"])
+            self.assertEqual(
+                {new_subject_category_uuid: [new_subject_category_scope2_uuid, ]},
+                subject_category_assignments["subject_category_assignments"][new_subject["id"]])
+
+            data = self.manager.add_subject_category_assignment_dict(
+                "admin",
+                self.ref["id"],
+                new_subject["id"],
+                new_subject_category_uuid,
+                new_subject_category_scope_uuid
+            )
+
+            subject_category_assignments = self.manager.get_subject_category_assignment_dict(
+                "admin",
+                self.ref["id"],
+                new_subject["id"]
+            )
+            self.assertIsInstance(subject_category_assignments, dict)
+            self.assertIn("subject_category_assignments", subject_category_assignments)
+            self.assertIn("id", subject_category_assignments)
+            self.assertIn("intra_extension_uuid", subject_category_assignments)
+            self.assertEqual(self.ref["id"], subject_category_assignments["intra_extension_uuid"])
+            self.assertEqual(
+                {new_subject_category_uuid: [new_subject_category_scope2_uuid, new_subject_category_scope_uuid]},
+                subject_category_assignments["subject_category_assignments"][new_subject["id"]])
+
+    def test_object_category_assignment(self):
+        self.create_intra_extension()
+
+        new_object = self.create_user()
+        new_objects = dict()
+        new_objects[new_object["id"]] = new_object["name"]
+        objects = self.manager.set_object_dict("admin", self.ref["id"], new_objects)
+
+        new_object_category_uuid = uuid.uuid4().hex
+        new_object_category_value = "role"
+        object_categories = self.manager.set_object_category_dict(
+            "admin",
+            self.ref["id"],
+            {
+                new_object_category_uuid: new_object_category_value
+            }
+        )
+
+        for object_category in object_categories["object_categories"]:
+            object_category_scope = self.manager.get_object_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                object_category)
+            self.assertIsInstance(object_category_scope, dict)
+            self.assertIn("object_category_scope", object_category_scope)
+            self.assertIn("id", object_category_scope)
+            self.assertIn("intra_extension_uuid", object_category_scope)
+            self.assertEqual(self.ref["id"], object_category_scope["intra_extension_uuid"])
+            self.assertIsInstance(object_category_scope["object_category_scope"], dict)
+
+            new_object_category_scope = dict()
+            new_object_category_scope_uuid = uuid.uuid4().hex
+            new_object_category_scope[new_object_category_scope_uuid] = "admin"
+            object_category_scope = self.manager.set_object_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                object_category,
+                new_object_category_scope)
+            self.assertIsInstance(object_category_scope, dict)
+            self.assertIn("object_category_scope", object_category_scope)
+            self.assertIn("id", object_category_scope)
+            self.assertIn("intra_extension_uuid", object_category_scope)
+            self.assertEqual(self.ref["id"], object_category_scope["intra_extension_uuid"])
+            self.assertIn(new_object_category_scope[new_object_category_scope_uuid],
+                          object_category_scope["object_category_scope"][object_category].values())
+
+            new_object_category_scope2 = dict()
+            new_object_category_scope2_uuid = uuid.uuid4().hex
+            new_object_category_scope2[new_object_category_scope2_uuid] = "dev"
+            object_category_scope = self.manager.set_object_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                object_category,
+                new_object_category_scope2)
+            self.assertIsInstance(object_category_scope, dict)
+            self.assertIn("object_category_scope", object_category_scope)
+            self.assertIn("id", object_category_scope)
+            self.assertIn("intra_extension_uuid", object_category_scope)
+            self.assertEqual(self.ref["id"], object_category_scope["intra_extension_uuid"])
+            self.assertIn(new_object_category_scope2[new_object_category_scope2_uuid],
+                          object_category_scope["object_category_scope"][object_category].values())
+
+            object_category_assignments = self.manager.get_object_category_assignment_dict(
+                "admin",
+                self.ref["id"],
+                new_object["id"]
+            )
+            self.assertIsInstance(object_category_assignments, dict)
+            self.assertIn("object_category_assignments", object_category_assignments)
+            self.assertIn("id", object_category_assignments)
+            self.assertIn("intra_extension_uuid", object_category_assignments)
+            self.assertEqual(self.ref["id"], object_category_assignments["intra_extension_uuid"])
+            self.assertEqual({}, object_category_assignments["object_category_assignments"][new_object["id"]])
+
+            object_category_assignments = self.manager.set_object_category_assignment_dict(
+                "admin",
+                self.ref["id"],
+                new_object["id"],
+                {
+                    new_object_category_uuid: [new_object_category_scope_uuid, new_object_category_scope2_uuid],
+                }
+            )
+            self.assertIsInstance(object_category_assignments, dict)
+            self.assertIn("object_category_assignments", object_category_assignments)
+            self.assertIn("id", object_category_assignments)
+            self.assertIn("intra_extension_uuid", object_category_assignments)
+            self.assertEqual(self.ref["id"], object_category_assignments["intra_extension_uuid"])
+            self.assertEqual(
+                {new_object_category_uuid: [new_object_category_scope_uuid, new_object_category_scope2_uuid]},
+                object_category_assignments["object_category_assignments"][new_object["id"]])
+            object_category_assignments = self.manager.get_object_category_assignment_dict(
+                "admin",
+                self.ref["id"],
+                new_object["id"]
+            )
+            self.assertIsInstance(object_category_assignments, dict)
+            self.assertIn("object_category_assignments", object_category_assignments)
+            self.assertIn("id", object_category_assignments)
+            self.assertIn("intra_extension_uuid", object_category_assignments)
+            self.assertEqual(self.ref["id"], object_category_assignments["intra_extension_uuid"])
+            self.assertEqual(
+                {new_object_category_uuid: [new_object_category_scope_uuid, new_object_category_scope2_uuid]},
+                object_category_assignments["object_category_assignments"][new_object["id"]])
+
+            self.manager.del_object_category_assignment(
+                "admin",
+                self.ref["id"],
+                new_object["id"],
+                new_object_category_uuid,
+                new_object_category_scope_uuid
+            )
+            object_category_assignments = self.manager.get_object_category_assignment_dict(
+                "admin",
+                self.ref["id"],
+                new_object["id"]
+            )
+            self.assertIsInstance(object_category_assignments, dict)
+            self.assertIn("object_category_assignments", object_category_assignments)
+            self.assertIn("id", object_category_assignments)
+            self.assertIn("intra_extension_uuid", object_category_assignments)
+            self.assertEqual(self.ref["id"], object_category_assignments["intra_extension_uuid"])
+            self.assertEqual(
+                {new_object_category_uuid: [new_object_category_scope2_uuid, ]},
+                object_category_assignments["object_category_assignments"][new_object["id"]])
+
+            self.manager.add_object_category_assignment_dict(
+                "admin",
+                self.ref["id"],
+                new_object["id"],
+                new_object_category_uuid,
+                new_object_category_scope_uuid
+            )
+
+            object_category_assignments = self.manager.get_object_category_assignment_dict(
+                "admin",
+                self.ref["id"],
+                new_object["id"]
+            )
+            self.assertIsInstance(object_category_assignments, dict)
+            self.assertIn("object_category_assignments", object_category_assignments)
+            self.assertIn("id", object_category_assignments)
+            self.assertIn("intra_extension_uuid", object_category_assignments)
+            self.assertEqual(self.ref["id"], object_category_assignments["intra_extension_uuid"])
+            self.assertEqual(
+                {new_object_category_uuid: [new_object_category_scope2_uuid, new_object_category_scope_uuid]},
+                object_category_assignments["object_category_assignments"][new_object["id"]])
+
+    def test_action_category_assignment(self):
+        self.create_intra_extension()
+
+        new_action = self.create_user()
+        new_actions = dict()
+        new_actions[new_action["id"]] = new_action["name"]
+        actions = self.manager.set_action_dict("admin", self.ref["id"], new_actions)
+
+        new_action_category_uuid = uuid.uuid4().hex
+        new_action_category_value = "role"
+        action_categories = self.manager.set_action_category_dict(
+            "admin",
+            self.ref["id"],
+            {
+                new_action_category_uuid: new_action_category_value
+            }
+        )
+
+        for action_category in action_categories["action_categories"]:
+            action_category_scope = self.manager.get_action_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                action_category)
+            self.assertIsInstance(action_category_scope, dict)
+            self.assertIn("action_category_scope", action_category_scope)
+            self.assertIn("id", action_category_scope)
+            self.assertIn("intra_extension_uuid", action_category_scope)
+            self.assertEqual(self.ref["id"], action_category_scope["intra_extension_uuid"])
+            self.assertIsInstance(action_category_scope["action_category_scope"], dict)
+
+            new_action_category_scope = dict()
+            new_action_category_scope_uuid = uuid.uuid4().hex
+            new_action_category_scope[new_action_category_scope_uuid] = "admin"
+            action_category_scope = self.manager.set_action_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                action_category,
+                new_action_category_scope)
+            self.assertIsInstance(action_category_scope, dict)
+            self.assertIn("action_category_scope", action_category_scope)
+            self.assertIn("id", action_category_scope)
+            self.assertIn("intra_extension_uuid", action_category_scope)
+            self.assertEqual(self.ref["id"], action_category_scope["intra_extension_uuid"])
+            self.assertIn(new_action_category_scope[new_action_category_scope_uuid],
+                          action_category_scope["action_category_scope"][action_category].values())
+
+            new_action_category_scope2 = dict()
+            new_action_category_scope2_uuid = uuid.uuid4().hex
+            new_action_category_scope2[new_action_category_scope2_uuid] = "dev"
+            action_category_scope = self.manager.set_action_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                action_category,
+                new_action_category_scope2)
+            self.assertIsInstance(action_category_scope, dict)
+            self.assertIn("action_category_scope", action_category_scope)
+            self.assertIn("id", action_category_scope)
+            self.assertIn("intra_extension_uuid", action_category_scope)
+            self.assertEqual(self.ref["id"], action_category_scope["intra_extension_uuid"])
+            self.assertIn(new_action_category_scope2[new_action_category_scope2_uuid],
+                          action_category_scope["action_category_scope"][action_category].values())
+
+            action_category_assignments = self.manager.get_action_category_assignment_dict(
+                "admin",
+                self.ref["id"],
+                new_action["id"]
+            )
+            self.assertIsInstance(action_category_assignments, dict)
+            self.assertIn("action_category_assignments", action_category_assignments)
+            self.assertIn("id", action_category_assignments)
+            self.assertIn("intra_extension_uuid", action_category_assignments)
+            self.assertEqual(self.ref["id"], action_category_assignments["intra_extension_uuid"])
+            self.assertEqual({}, action_category_assignments["action_category_assignments"][new_action["id"]])
+
+            action_category_assignments = self.manager.set_action_category_assignment_dict(
+                "admin",
+                self.ref["id"],
+                new_action["id"],
+                {
+                    new_action_category_uuid: [new_action_category_scope_uuid, new_action_category_scope2_uuid],
+                }
+            )
+            self.assertIsInstance(action_category_assignments, dict)
+            self.assertIn("action_category_assignments", action_category_assignments)
+            self.assertIn("id", action_category_assignments)
+            self.assertIn("intra_extension_uuid", action_category_assignments)
+            self.assertEqual(self.ref["id"], action_category_assignments["intra_extension_uuid"])
+            self.assertEqual(
+                {new_action_category_uuid: [new_action_category_scope_uuid, new_action_category_scope2_uuid]},
+                action_category_assignments["action_category_assignments"][new_action["id"]])
+            action_category_assignments = self.manager.get_action_category_assignment_dict(
+                "admin",
+                self.ref["id"],
+                new_action["id"]
+            )
+            self.assertIsInstance(action_category_assignments, dict)
+            self.assertIn("action_category_assignments", action_category_assignments)
+            self.assertIn("id", action_category_assignments)
+            self.assertIn("intra_extension_uuid", action_category_assignments)
+            self.assertEqual(self.ref["id"], action_category_assignments["intra_extension_uuid"])
+            self.assertEqual(
+                {new_action_category_uuid: [new_action_category_scope_uuid, new_action_category_scope2_uuid]},
+                action_category_assignments["action_category_assignments"][new_action["id"]])
+
+            self.manager.del_action_category_assignment(
+                "admin",
+                self.ref["id"],
+                new_action["id"],
+                new_action_category_uuid,
+                new_action_category_scope_uuid
+            )
+            action_category_assignments = self.manager.get_action_category_assignment_dict(
+                "admin",
+                self.ref["id"],
+                new_action["id"]
+            )
+            self.assertIsInstance(action_category_assignments, dict)
+            self.assertIn("action_category_assignments", action_category_assignments)
+            self.assertIn("id", action_category_assignments)
+            self.assertIn("intra_extension_uuid", action_category_assignments)
+            self.assertEqual(self.ref["id"], action_category_assignments["intra_extension_uuid"])
+            self.assertEqual(
+                {new_action_category_uuid: [new_action_category_scope2_uuid, ]},
+                action_category_assignments["action_category_assignments"][new_action["id"]])
+
+            self.manager.add_action_category_assignment_dict(
+                "admin",
+                self.ref["id"],
+                new_action["id"],
+                new_action_category_uuid,
+                new_action_category_scope_uuid
+            )
+
+            action_category_assignments = self.manager.get_action_category_assignment_dict(
+                "admin",
+                self.ref["id"],
+                new_action["id"]
+            )
+            self.assertIsInstance(action_category_assignments, dict)
+            self.assertIn("action_category_assignments", action_category_assignments)
+            self.assertIn("id", action_category_assignments)
+            self.assertIn("intra_extension_uuid", action_category_assignments)
+            self.assertEqual(self.ref["id"], action_category_assignments["intra_extension_uuid"])
+            self.assertEqual(
+                {new_action_category_uuid: [new_action_category_scope2_uuid, new_action_category_scope_uuid]},
+                action_category_assignments["action_category_assignments"][new_action["id"]])
+
+    def test_sub_meta_rules(self):
+        self.create_intra_extension()
+
+        aggregation_algorithms = self.manager.get_aggregation_algorithms("admin", self.ref["id"])
+        self.assertIsInstance(aggregation_algorithms, dict)
+        self.assertIsInstance(aggregation_algorithms["aggregation_algorithms"], list)
+        self.assertIn("and_true_aggregation", aggregation_algorithms["aggregation_algorithms"])
+        self.assertIn("test_aggregation", aggregation_algorithms["aggregation_algorithms"])
+
+        aggregation_algorithm = self.manager.get_aggregation_algorithm("admin", self.ref["id"])
+        self.assertIsInstance(aggregation_algorithm, dict)
+        self.assertIn("aggregation", aggregation_algorithm)
+        self.assertIn(aggregation_algorithm["aggregation"], aggregation_algorithms["aggregation_algorithms"])
+
+        _aggregation_algorithm = list(aggregation_algorithms["aggregation_algorithms"])
+        _aggregation_algorithm.remove(aggregation_algorithm["aggregation"])
+        aggregation_algorithm = self.manager.set_aggregation_algorithm("admin", self.ref["id"], _aggregation_algorithm[0])
+        self.assertIsInstance(aggregation_algorithm, dict)
+        self.assertIn("aggregation", aggregation_algorithm)
+        self.assertIn(aggregation_algorithm["aggregation"], aggregation_algorithms["aggregation_algorithms"])
+
+        sub_meta_rules = self.manager.get_sub_meta_rule("admin", self.ref["id"])
+        self.assertIsInstance(sub_meta_rules, dict)
+        self.assertIn("sub_meta_rules", sub_meta_rules)
+        sub_meta_rules_conf = json.load(open(os.path.join(self.policy_directory, self.ref["model"], "metarule.json")))
+        metarule = dict()
+        categories = {
+            "subject_categories": self.manager.get_subject_category_dict("admin", self.ref["id"]),
+            "object_categories": self.manager.get_object_category_dict("admin", self.ref["id"]),
+            "action_categories": self.manager.get_action_category_dict("admin", self.ref["id"])
+        }
+        for relation in sub_meta_rules_conf["sub_meta_rules"]:
+            metarule[relation] = dict()
+            for item in ("subject_categories", "object_categories", "action_categories"):
+                metarule[relation][item] = list()
+                for element in sub_meta_rules_conf["sub_meta_rules"][relation][item]:
+                    metarule[relation][item].append(self.__get_key_from_value(
+                        element,
+                        categories[item][item]
+                    ))
+
+        for relation in sub_meta_rules["sub_meta_rules"]:
+            self.assertIn(relation, metarule)
+            for item in ("subject_categories", "object_categories", "action_categories"):
+                self.assertEqual(
+                    sub_meta_rules["sub_meta_rules"][relation][item],
+                    metarule[relation][item]
+                )
+
+            new_subject_category = {"id": uuid.uuid4().hex, "name": "subject_category_test"}
+            # Add a particular subject_category
+            data = self.manager.add_subject_category_dict(
+                "admin",
+                self.ref["id"],
+                new_subject_category["name"])
+            new_subject_category["id"] = data["subject_category"]["uuid"]
+            subject_categories = self.manager.get_subject_category_dict(
+                "admin",
+                self.ref["id"])
+            self.assertIsInstance(subject_categories, dict)
+            self.assertIn("subject_categories", subject_categories)
+            self.assertIn("id", subject_categories)
+            self.assertIn("intra_extension_uuid", subject_categories)
+            self.assertEqual(self.ref["id"], subject_categories["intra_extension_uuid"])
+            self.assertIn(new_subject_category["id"], subject_categories["subject_categories"])
+            metarule[relation]["subject_categories"].append(new_subject_category["id"])
+            _sub_meta_rules = self.manager.set_sub_meta_rule("admin", self.ref["id"], metarule)
+            self.assertIn(relation, metarule)
+            for item in ("subject_categories", "object_categories", "action_categories"):
+                self.assertEqual(
+                    _sub_meta_rules["sub_meta_rules"][relation][item],
+                    metarule[relation][item]
+                )
+
+    def test_sub_rules(self):
+        self.create_intra_extension()
+
+        sub_meta_rules = self.manager.get_sub_meta_rule("admin", self.ref["id"])
+        self.assertIsInstance(sub_meta_rules, dict)
+        self.assertIn("sub_meta_rules", sub_meta_rules)
+
+        sub_rules = self.manager.get_sub_rules("admin", self.ref["id"])
+        self.assertIsInstance(sub_rules, dict)
+        self.assertIn("rules", sub_rules)
+        rules = dict()
+        for relation in sub_rules["rules"]:
+            self.assertIn(relation, self.manager.get_sub_meta_rule_relations("admin", self.ref["id"])["sub_meta_rule_relations"])
+            rules[relation] = list()
+            for rule in sub_rules["rules"][relation]:
+                print(rule)
+                for cat, cat_func, func_name in (
+                    ("subject_categories", self.manager.get_subject_category_scope_dict, "subject_category_scope"),
+                    ("action_categories", self.manager.get_action_category_scope_dict, "action_category_scope"),
+                    ("object_categories", self.manager.get_object_category_scope_dict, "object_category_scope"),
+                ):
+                    for cat_value in sub_meta_rules["sub_meta_rules"][relation][cat]:
+                        scope = cat_func(
+                            "admin",
+                            self.ref["id"],
+                            cat_value
+                        )
+                        a_scope = rule.pop(0)
+                        print(a_scope)
+                        if type(a_scope) is not bool:
+                            self.assertIn(a_scope, scope[func_name][cat_value])
+
+        # add a new subrule
+
+        relation = sub_rules["rules"].keys()[0]
+        sub_rule = []
+        for cat, cat_func, func_name in (
+            ("subject_categories", self.manager.get_subject_category_scope_dict, "subject_category_scope"),
+            ("action_categories", self.manager.get_action_category_scope_dict, "action_category_scope"),
+            ("object_categories", self.manager.get_object_category_scope_dict, "object_category_scope"),
+        ):
+            for cat_value in sub_meta_rules["sub_meta_rules"][relation][cat]:
+                scope = cat_func(
+                    "admin",
+                    self.ref["id"],
+                    cat_value
+                )
+                sub_rule.append(scope[func_name][cat_value].keys()[0])
+
+        sub_rule.append(True)
+        sub_rules = self.manager.set_sub_rule("admin", self.ref["id"], relation, sub_rule)
+        self.assertIsInstance(sub_rules, dict)
+        self.assertIn("rules", sub_rules)
+        rules = dict()
+        self.assertIn(sub_rule, sub_rules["rules"][relation])
+        for relation in sub_rules["rules"]:
+            self.assertIn(relation, self.manager.get_sub_meta_rule_relations("admin", self.ref["id"])["sub_meta_rule_relations"])
+            rules[relation] = list()
+            for rule in sub_rules["rules"][relation]:
+                for cat, cat_func, func_name in (
+                    ("subject_categories", self.manager.get_subject_category_scope_dict, "subject_category_scope"),
+                    ("action_categories", self.manager.get_action_category_scope_dict, "action_category_scope"),
+                    ("object_categories", self.manager.get_object_category_scope_dict, "object_category_scope"),
+                ):
+                    for cat_value in sub_meta_rules["sub_meta_rules"][relation][cat]:
+                        scope = cat_func(
+                            "admin",
+                            self.ref["id"],
+                            cat_value
+                        )
+                        a_scope = rule.pop(0)
+                        self.assertIn(a_scope, scope[func_name][cat_value])
+
+
+
+
diff --git a/keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_authz.py b/keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_authz.py
new file mode 100644 (file)
index 0000000..d08ecf3
--- /dev/null
@@ -0,0 +1,861 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
+
+"""Unit tests for core IntraExtensionAuthzManager"""
+
+import json
+import os
+import uuid
+from oslo_config import cfg
+from keystone.tests import unit as tests
+from keystone.contrib.moon.core import IntraExtensionAdminManager, IntraExtensionAuthzManager
+from keystone.tests.unit.ksfixtures import database
+from keystone import resource
+from keystone.contrib.moon.exception import *
+from keystone.tests.unit import default_fixtures
+from keystone.contrib.moon.core import LogManager, TenantManager
+
+CONF = cfg.CONF
+
+USER_ADMIN = {
+    'name': 'admin',
+    'domain_id': "default",
+    'password': 'admin'
+}
+
+IE = {
+    "name": "test IE",
+    "policymodel": "policy_rbac_authz",
+    "description": "a simple description."
+}
+
+class TestIntraExtensionAuthzManagerAuthz(tests.TestCase):
+
+    def setUp(self):
+        self.useFixture(database.Database())
+        super(TestIntraExtensionAuthzManager, self).setUp()
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+        self.manager = IntraExtensionAuthzManager()
+        self.admin_manager = IntraExtensionAdminManager()
+
+    def __get_key_from_value(self, value, values_dict):
+        return filter(lambda v: v[1] == value, values_dict.iteritems())[0][0]
+
+    def load_extra_backends(self):
+        return {
+            "moonlog_api": LogManager(),
+            "tenant_api": TenantManager(),
+            # "resource_api": resource.Manager(),
+        }
+
+    def config_overrides(self):
+        super(TestIntraExtensionAuthzManager, self).config_overrides()
+        self.policy_directory = '../../../examples/moon/policies'
+        self.config_fixture.config(
+            group='moon',
+            intraextension_driver='keystone.contrib.moon.backends.sql.IntraExtensionConnector')
+        self.config_fixture.config(
+            group='moon',
+            policy_directory=self.policy_directory)
+
+
+class TestIntraExtensionAuthzManager(tests.TestCase):
+
+    def setUp(self):
+        self.useFixture(database.Database())
+        super(TestIntraExtensionAuthzManager, self).setUp()
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+        self.manager = IntraExtensionAuthzManager()
+        self.admin_manager = IntraExtensionAdminManager()
+
+    def __get_key_from_value(self, value, values_dict):
+        return filter(lambda v: v[1] == value, values_dict.iteritems())[0][0]
+
+    def load_extra_backends(self):
+        return {
+            "moonlog_api": LogManager(),
+            "tenant_api": TenantManager(),
+            # "resource_api": resource.Manager(),
+        }
+
+    def config_overrides(self):
+        super(TestIntraExtensionAuthzManager, self).config_overrides()
+        self.policy_directory = '../../../examples/moon/policies'
+        self.config_fixture.config(
+            group='moon',
+            intraextension_driver='keystone.contrib.moon.backends.sql.IntraExtensionConnector')
+        self.config_fixture.config(
+            group='moon',
+            policy_directory=self.policy_directory)
+
+    def create_intra_extension(self, policy_model="policy_rbac_authz"):
+        # Create the admin user because IntraExtension needs it
+        self.admin = self.identity_api.create_user(USER_ADMIN)
+        IE["policymodel"] = policy_model
+        self.ref = self.admin_manager.load_intra_extension(IE)
+        self.assertIsInstance(self.ref, dict)
+        self.create_tenant(self.ref["id"])
+
+    def create_tenant(self, authz_uuid):
+        tenant = {
+            "id": uuid.uuid4().hex,
+            "name": "TestIntraExtensionAuthzManager",
+            "enabled": True,
+            "description": "",
+            "domain_id": "default"
+        }
+        project = self.resource_api.create_project(tenant["id"], tenant)
+        mapping = self.tenant_api.set_tenant_dict(project["id"], project["name"], authz_uuid, None)
+        self.assertIsInstance(mapping, dict)
+        self.assertIn("authz", mapping)
+        self.assertEqual(mapping["authz"], authz_uuid)
+        return mapping
+
+    def create_user(self, username="TestIntraExtensionAuthzManagerUser"):
+        user = {
+            "id": uuid.uuid4().hex,
+            "name": username,
+            "enabled": True,
+            "description": "",
+            "domain_id": "default"
+        }
+        _user = self.identity_api.create_user(user)
+        return _user
+
+    def delete_admin_intra_extension(self):
+        self.assertRaises(
+            AuthIntraExtensionModificationNotAuthorized,
+            self.manager.delete_intra_extension,
+            self.ref["id"])
+
+    def test_subjects(self):
+        self.create_intra_extension()
+
+        subjects = self.manager.get_subject_dict("admin", self.ref["id"])
+        self.assertIsInstance(subjects, dict)
+        self.assertIn("subjects", subjects)
+        self.assertIn("id", subjects)
+        self.assertIn("intra_extension_uuid", subjects)
+        self.assertEqual(self.ref["id"], subjects["intra_extension_uuid"])
+        self.assertIsInstance(subjects["subjects"], dict)
+
+        new_subject = self.create_user()
+        new_subjects = dict()
+        new_subjects[new_subject["id"]] = new_subject["name"]
+        self.assertRaises(
+            AuthIntraExtensionModificationNotAuthorized,
+            self.manager.set_subject_dict,
+            "admin", self.ref["id"], new_subjects)
+
+        # Delete the new subject
+        self.assertRaises(
+            AuthIntraExtensionModificationNotAuthorized,
+            self.manager.del_subject,
+            "admin", self.ref["id"], new_subject["id"])
+
+        # Add a particular subject
+        self.assertRaises(
+            AuthIntraExtensionModificationNotAuthorized,
+            self.manager.add_subject_dict,
+            "admin", self.ref["id"], new_subject["id"])
+
+    def test_objects(self):
+        self.create_intra_extension()
+
+        objects = self.manager.get_object_dict("admin", self.ref["id"])
+        self.assertIsInstance(objects, dict)
+        self.assertIn("objects", objects)
+        self.assertIn("id", objects)
+        self.assertIn("intra_extension_uuid", objects)
+        self.assertEqual(self.ref["id"], objects["intra_extension_uuid"])
+        self.assertIsInstance(objects["objects"], dict)
+
+        new_object = self.create_user()
+        new_objects = dict()
+        new_objects[new_object["id"]] = new_object["name"]
+        self.assertRaises(
+            AuthIntraExtensionModificationNotAuthorized,
+            self.manager.set_object_dict,
+            "admin", self.ref["id"], new_object["id"])
+
+        # Delete the new object
+        self.assertRaises(
+            AuthIntraExtensionModificationNotAuthorized,
+            self.manager.del_object,
+            "admin", self.ref["id"], new_object["id"])
+
+        # Add a particular object
+        self.assertRaises(
+            AuthIntraExtensionModificationNotAuthorized,
+            self.manager.add_object_dict,
+            "admin", self.ref["id"], new_object["name"])
+
+    def test_actions(self):
+        self.create_intra_extension()
+
+        actions = self.manager.get_action_dict("admin", self.ref["id"])
+        self.assertIsInstance(actions, dict)
+        self.assertIn("actions", actions)
+        self.assertIn("id", actions)
+        self.assertIn("intra_extension_uuid", actions)
+        self.assertEqual(self.ref["id"], actions["intra_extension_uuid"])
+        self.assertIsInstance(actions["actions"], dict)
+
+        new_action = self.create_user()
+        new_actions = dict()
+        new_actions[new_action["id"]] = new_action["name"]
+        self.assertRaises(
+            AuthIntraExtensionModificationNotAuthorized,
+            self.manager.set_action_dict,
+            "admin", self.ref["id"], new_actions)
+
+        # Delete the new action
+        self.assertRaises(
+            AuthIntraExtensionModificationNotAuthorized,
+            self.manager.del_action,
+            "admin", self.ref["id"], new_action["id"])
+
+        # Add a particular action
+        self.assertRaises(
+            AuthIntraExtensionModificationNotAuthorized,
+            self.manager.add_action_dict,
+            "admin", self.ref["id"], new_action["id"])
+
+    def test_subject_categories(self):
+        self.create_intra_extension()
+
+        subject_categories = self.manager.get_subject_category_dict("admin", self.ref["id"])
+        self.assertIsInstance(subject_categories, dict)
+        self.assertIn("subject_categories", subject_categories)
+        self.assertIn("id", subject_categories)
+        self.assertIn("intra_extension_uuid", subject_categories)
+        self.assertEqual(self.ref["id"], subject_categories["intra_extension_uuid"])
+        self.assertIsInstance(subject_categories["subject_categories"], dict)
+
+        new_subject_category = {"id": uuid.uuid4().hex, "name": "subject_category_test"}
+        new_subject_categories = dict()
+        new_subject_categories[new_subject_category["id"]] = new_subject_category["name"]
+        self.assertRaises(
+            AuthIntraExtensionModificationNotAuthorized,
+            self.manager.set_subject_category_dict,
+            "admin", self.ref["id"], new_subject_categories)
+
+        # Delete the new subject_category
+        self.assertRaises(
+            AuthIntraExtensionModificationNotAuthorized,
+            self.manager.del_subject_category,
+            "admin", self.ref["id"], new_subject_category["id"])
+
+        # Add a particular subject_category
+        self.assertRaises(
+            AuthIntraExtensionModificationNotAuthorized,
+            self.manager.add_subject_category_dict,
+            "admin", self.ref["id"], new_subject_category["name"])
+
+    def test_object_categories(self):
+        self.create_intra_extension()
+
+        object_categories = self.manager.get_object_category_dict("admin", self.ref["id"])
+        self.assertIsInstance(object_categories, dict)
+        self.assertIn("object_categories", object_categories)
+        self.assertIn("id", object_categories)
+        self.assertIn("intra_extension_uuid", object_categories)
+        self.assertEqual(self.ref["id"], object_categories["intra_extension_uuid"])
+        self.assertIsInstance(object_categories["object_categories"], dict)
+
+        new_object_category = {"id": uuid.uuid4().hex, "name": "object_category_test"}
+        new_object_categories = dict()
+        new_object_categories[new_object_category["id"]] = new_object_category["name"]
+        self.assertRaises(
+            AuthIntraExtensionModificationNotAuthorized,
+            self.manager.set_object_category_dict,
+            "admin", self.ref["id"], new_object_categories)
+
+        # Delete the new object_category
+        self.assertRaises(
+            AuthIntraExtensionModificationNotAuthorized,
+            self.manager.del_object_category,
+            "admin", self.ref["id"], new_object_category["id"])
+
+        # Add a particular object_category
+        self.assertRaises(
+            AuthIntraExtensionModificationNotAuthorized,
+            self.manager.add_object_category_dict,
+            "admin", self.ref["id"], new_object_category["name"])
+
+    def test_action_categories(self):
+        self.create_intra_extension()
+
+        action_categories = self.manager.get_action_category_dict("admin", self.ref["id"])
+        self.assertIsInstance(action_categories, dict)
+        self.assertIn("action_categories", action_categories)
+        self.assertIn("id", action_categories)
+        self.assertIn("intra_extension_uuid", action_categories)
+        self.assertEqual(self.ref["id"], action_categories["intra_extension_uuid"])
+        self.assertIsInstance(action_categories["action_categories"], dict)
+
+        new_action_category = {"id": uuid.uuid4().hex, "name": "action_category_test"}
+        new_action_categories = dict()
+        new_action_categories[new_action_category["id"]] = new_action_category["name"]
+        self.assertRaises(
+            AuthIntraExtensionModificationNotAuthorized,
+            self.manager.set_action_category_dict,
+            "admin", self.ref["id"], new_action_categories)
+
+        # Delete the new action_category
+        self.assertRaises(
+            AuthIntraExtensionModificationNotAuthorized,
+            self.manager.del_action_category,
+            "admin", self.ref["id"], new_action_category["id"])
+
+        # Add a particular action_category
+        self.assertRaises(
+            AuthIntraExtensionModificationNotAuthorized,
+            self.manager.add_action_category_dict,
+            "admin", self.ref["id"], new_action_category["name"])
+
+    def test_subject_category_scope(self):
+        self.create_intra_extension()
+
+        subject_categories = self.admin_manager.set_subject_category_dict(
+            "admin",
+            self.ref["id"],
+            {
+                uuid.uuid4().hex: "admin",
+                uuid.uuid4().hex: "dev",
+            }
+        )
+
+        for subject_category in subject_categories["subject_categories"]:
+            subject_category_scope = self.manager.get_subject_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                subject_category)
+            self.assertIsInstance(subject_category_scope, dict)
+            self.assertIn("subject_category_scope", subject_category_scope)
+            self.assertIn("id", subject_category_scope)
+            self.assertIn("intra_extension_uuid", subject_category_scope)
+            self.assertEqual(self.ref["id"], subject_category_scope["intra_extension_uuid"])
+            self.assertIsInstance(subject_category_scope["subject_category_scope"], dict)
+
+            new_subject_category_scope = dict()
+            new_subject_category_scope_uuid = uuid.uuid4().hex
+            new_subject_category_scope[new_subject_category_scope_uuid] = "new_subject_category_scope"
+            self.assertRaises(
+                AuthIntraExtensionModificationNotAuthorized,
+                self.manager.set_subject_category_scope_dict,
+                "admin", self.ref["id"], subject_category, new_subject_category_scope)
+
+            # Delete the new subject_category_scope
+            self.assertRaises(
+                AuthIntraExtensionModificationNotAuthorized,
+                self.manager.del_subject_category_scope,
+                "admin", self.ref["id"], subject_category, new_subject_category_scope_uuid)
+
+            # Add a particular subject_category_scope
+            self.assertRaises(
+                AuthIntraExtensionModificationNotAuthorized,
+                self.manager.add_subject_category_scope_dict,
+                "admin", self.ref["id"], subject_category, new_subject_category_scope[new_subject_category_scope_uuid])
+
+    def test_object_category_scope(self):
+        self.create_intra_extension()
+
+        object_categories = self.admin_manager.set_object_category_dict(
+            "admin",
+            self.ref["id"],
+            {
+                uuid.uuid4().hex: "id",
+                uuid.uuid4().hex: "domain",
+            }
+        )
+
+        for object_category in object_categories["object_categories"]:
+            object_category_scope = self.manager.get_object_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                object_category)
+            self.assertIsInstance(object_category_scope, dict)
+            self.assertIn("object_category_scope", object_category_scope)
+            self.assertIn("id", object_category_scope)
+            self.assertIn("intra_extension_uuid", object_category_scope)
+            self.assertEqual(self.ref["id"], object_category_scope["intra_extension_uuid"])
+            self.assertIsInstance(object_category_scope["object_category_scope"], dict)
+
+            new_object_category_scope = dict()
+            new_object_category_scope_uuid = uuid.uuid4().hex
+            new_object_category_scope[new_object_category_scope_uuid] = "new_object_category_scope"
+            self.assertRaises(
+                AuthIntraExtensionModificationNotAuthorized,
+                self.manager.set_object_category_scope_dict,
+                "admin", self.ref["id"], object_category, new_object_category_scope)
+
+            # Delete the new object_category_scope
+            self.assertRaises(
+                AuthIntraExtensionModificationNotAuthorized,
+                self.manager.del_object_category_scope,
+                "admin", self.ref["id"], object_category, new_object_category_scope_uuid)
+
+            # Add a particular object_category_scope
+            self.assertRaises(
+                AuthIntraExtensionModificationNotAuthorized,
+                self.manager.add_object_category_scope_dict,
+                "admin", self.ref["id"], object_category, new_object_category_scope[new_object_category_scope_uuid])
+
+    def test_action_category_scope(self):
+        self.create_intra_extension()
+
+        action_categories = self.admin_manager.set_action_category_dict(
+            "admin",
+            self.ref["id"],
+            {
+                uuid.uuid4().hex: "compute",
+                uuid.uuid4().hex: "identity",
+            }
+        )
+
+        for action_category in action_categories["action_categories"]:
+            action_category_scope = self.manager.get_action_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                action_category)
+            self.assertIsInstance(action_category_scope, dict)
+            self.assertIn("action_category_scope", action_category_scope)
+            self.assertIn("id", action_category_scope)
+            self.assertIn("intra_extension_uuid", action_category_scope)
+            self.assertEqual(self.ref["id"], action_category_scope["intra_extension_uuid"])
+            self.assertIsInstance(action_category_scope["action_category_scope"], dict)
+
+            new_action_category_scope = dict()
+            new_action_category_scope_uuid = uuid.uuid4().hex
+            new_action_category_scope[new_action_category_scope_uuid] = "new_action_category_scope"
+            self.assertRaises(
+                AuthIntraExtensionModificationNotAuthorized,
+                self.manager.set_action_category_scope_dict,
+                "admin", self.ref["id"], action_category, new_action_category_scope)
+
+            # Delete the new action_category_scope
+            self.assertRaises(
+                AuthIntraExtensionModificationNotAuthorized,
+                self.manager.del_action_category_scope,
+                "admin", self.ref["id"], action_category, new_action_category_scope_uuid)
+
+            # Add a particular action_category_scope
+            self.assertRaises(
+                AuthIntraExtensionModificationNotAuthorized,
+                self.manager.add_action_category_scope_dict,
+                "admin", self.ref["id"], action_category, new_action_category_scope[new_action_category_scope_uuid])
+
+    def test_subject_category_assignment(self):
+        self.create_intra_extension()
+
+        new_subject = self.create_user()
+        new_subjects = dict()
+        new_subjects[new_subject["id"]] = new_subject["name"]
+        subjects = self.admin_manager.set_subject_dict("admin", self.ref["id"], new_subjects)
+
+        new_subject_category_uuid = uuid.uuid4().hex
+        new_subject_category_value = "role"
+        subject_categories = self.admin_manager.set_subject_category_dict(
+            "admin",
+            self.ref["id"],
+            {
+                new_subject_category_uuid: new_subject_category_value
+            }
+        )
+
+        for subject_category in subject_categories["subject_categories"]:
+            subject_category_scope = self.admin_manager.get_subject_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                subject_category)
+            self.assertIsInstance(subject_category_scope, dict)
+            self.assertIn("subject_category_scope", subject_category_scope)
+            self.assertIn("id", subject_category_scope)
+            self.assertIn("intra_extension_uuid", subject_category_scope)
+            self.assertEqual(self.ref["id"], subject_category_scope["intra_extension_uuid"])
+            self.assertIsInstance(subject_category_scope["subject_category_scope"], dict)
+
+            new_subject_category_scope = dict()
+            new_subject_category_scope_uuid = uuid.uuid4().hex
+            new_subject_category_scope[new_subject_category_scope_uuid] = "admin"
+            subject_category_scope = self.admin_manager.set_subject_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                subject_category,
+                new_subject_category_scope)
+            self.assertIsInstance(subject_category_scope, dict)
+            self.assertIn("subject_category_scope", subject_category_scope)
+            self.assertIn("id", subject_category_scope)
+            self.assertIn("intra_extension_uuid", subject_category_scope)
+            self.assertEqual(self.ref["id"], subject_category_scope["intra_extension_uuid"])
+            self.assertIn(new_subject_category_scope[new_subject_category_scope_uuid],
+                          subject_category_scope["subject_category_scope"][subject_category].values())
+
+            new_subject_category_scope2 = dict()
+            new_subject_category_scope2_uuid = uuid.uuid4().hex
+            new_subject_category_scope2[new_subject_category_scope2_uuid] = "dev"
+            subject_category_scope = self.admin_manager.set_subject_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                subject_category,
+                new_subject_category_scope2)
+            self.assertIsInstance(subject_category_scope, dict)
+            self.assertIn("subject_category_scope", subject_category_scope)
+            self.assertIn("id", subject_category_scope)
+            self.assertIn("intra_extension_uuid", subject_category_scope)
+            self.assertEqual(self.ref["id"], subject_category_scope["intra_extension_uuid"])
+            self.assertIn(new_subject_category_scope2[new_subject_category_scope2_uuid],
+                          subject_category_scope["subject_category_scope"][subject_category].values())
+
+            subject_category_assignments = self.manager.get_subject_category_assignment_dict(
+                "admin",
+                self.ref["id"],
+                new_subject["id"]
+            )
+            self.assertIsInstance(subject_category_assignments, dict)
+            self.assertIn("subject_category_assignments", subject_category_assignments)
+            self.assertIn("id", subject_category_assignments)
+            self.assertIn("intra_extension_uuid", subject_category_assignments)
+            self.assertEqual(self.ref["id"], subject_category_assignments["intra_extension_uuid"])
+            self.assertEqual({}, subject_category_assignments["subject_category_assignments"][new_subject["id"]])
+
+            self.assertRaises(
+                AuthIntraExtensionModificationNotAuthorized,
+                self.manager.set_subject_category_assignment_dict,
+                "admin", self.ref["id"], new_subject["id"],
+                {
+                    new_subject_category_uuid: [new_subject_category_scope_uuid, new_subject_category_scope2_uuid],
+                })
+
+            self.assertRaises(
+                AuthIntraExtensionModificationNotAuthorized,
+                self.manager.del_subject_category_assignment,
+                "admin", self.ref["id"], new_subject["id"],
+                new_subject_category_uuid,
+                new_subject_category_scope_uuid)
+
+            self.assertRaises(
+                AuthIntraExtensionModificationNotAuthorized,
+                self.manager.add_subject_category_assignment_dict,
+                "admin", self.ref["id"], new_subject["id"],
+                new_subject_category_uuid,
+                new_subject_category_scope_uuid)
+
+    def test_object_category_assignment(self):
+        self.create_intra_extension()
+
+        new_object = self.create_user()
+        new_objects = dict()
+        new_objects[new_object["id"]] = new_object["name"]
+        objects = self.admin_manager.set_object_dict("admin", self.ref["id"], new_objects)
+
+        new_object_category_uuid = uuid.uuid4().hex
+        new_object_category_value = "role"
+        object_categories = self.admin_manager.set_object_category_dict(
+            "admin",
+            self.ref["id"],
+            {
+                new_object_category_uuid: new_object_category_value
+            }
+        )
+
+        for object_category in object_categories["object_categories"]:
+            object_category_scope = self.admin_manager.get_object_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                object_category)
+            self.assertIsInstance(object_category_scope, dict)
+            self.assertIn("object_category_scope", object_category_scope)
+            self.assertIn("id", object_category_scope)
+            self.assertIn("intra_extension_uuid", object_category_scope)
+            self.assertEqual(self.ref["id"], object_category_scope["intra_extension_uuid"])
+            self.assertIsInstance(object_category_scope["object_category_scope"], dict)
+
+            new_object_category_scope = dict()
+            new_object_category_scope_uuid = uuid.uuid4().hex
+            new_object_category_scope[new_object_category_scope_uuid] = "admin"
+            object_category_scope = self.admin_manager.set_object_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                object_category,
+                new_object_category_scope)
+            self.assertIsInstance(object_category_scope, dict)
+            self.assertIn("object_category_scope", object_category_scope)
+            self.assertIn("id", object_category_scope)
+            self.assertIn("intra_extension_uuid", object_category_scope)
+            self.assertEqual(self.ref["id"], object_category_scope["intra_extension_uuid"])
+            self.assertIn(new_object_category_scope[new_object_category_scope_uuid],
+                          object_category_scope["object_category_scope"][object_category].values())
+
+            new_object_category_scope2 = dict()
+            new_object_category_scope2_uuid = uuid.uuid4().hex
+            new_object_category_scope2[new_object_category_scope2_uuid] = "dev"
+            object_category_scope = self.admin_manager.set_object_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                object_category,
+                new_object_category_scope2)
+            self.assertIsInstance(object_category_scope, dict)
+            self.assertIn("object_category_scope", object_category_scope)
+            self.assertIn("id", object_category_scope)
+            self.assertIn("intra_extension_uuid", object_category_scope)
+            self.assertEqual(self.ref["id"], object_category_scope["intra_extension_uuid"])
+            self.assertIn(new_object_category_scope2[new_object_category_scope2_uuid],
+                          object_category_scope["object_category_scope"][object_category].values())
+
+            object_category_assignments = self.manager.get_object_category_assignment_dict(
+                "admin",
+                self.ref["id"],
+                new_object["id"]
+            )
+            self.assertIsInstance(object_category_assignments, dict)
+            self.assertIn("object_category_assignments", object_category_assignments)
+            self.assertIn("id", object_category_assignments)
+            self.assertIn("intra_extension_uuid", object_category_assignments)
+            self.assertEqual(self.ref["id"], object_category_assignments["intra_extension_uuid"])
+            self.assertEqual({}, object_category_assignments["object_category_assignments"][new_object["id"]])
+
+            self.assertRaises(
+                AuthIntraExtensionModificationNotAuthorized,
+                self.manager.set_object_category_assignment_dict,
+                "admin", self.ref["id"], new_object["id"],
+                {
+                    new_object_category_uuid: [new_object_category_scope_uuid, new_object_category_scope2_uuid],
+                })
+
+            self.assertRaises(
+                AuthIntraExtensionModificationNotAuthorized,
+                self.manager.del_object_category_assignment,
+                "admin", self.ref["id"], new_object["id"],
+                new_object_category_uuid,
+                new_object_category_scope_uuid)
+
+            self.assertRaises(
+                AuthIntraExtensionModificationNotAuthorized,
+                self.manager.add_object_category_assignment_dict,
+                "admin", self.ref["id"], new_object["id"],
+                new_object_category_uuid,
+                new_object_category_scope_uuid)
+
+    def test_action_category_assignment(self):
+        self.create_intra_extension()
+
+        new_action = self.create_user()
+        new_actions = dict()
+        new_actions[new_action["id"]] = new_action["name"]
+        actions = self.admin_manager.set_action_dict("admin", self.ref["id"], new_actions)
+
+        new_action_category_uuid = uuid.uuid4().hex
+        new_action_category_value = "role"
+        action_categories = self.admin_manager.set_action_category_dict(
+            "admin",
+            self.ref["id"],
+            {
+                new_action_category_uuid: new_action_category_value
+            }
+        )
+
+        for action_category in action_categories["action_categories"]:
+            action_category_scope = self.admin_manager.get_action_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                action_category)
+            self.assertIsInstance(action_category_scope, dict)
+            self.assertIn("action_category_scope", action_category_scope)
+            self.assertIn("id", action_category_scope)
+            self.assertIn("intra_extension_uuid", action_category_scope)
+            self.assertEqual(self.ref["id"], action_category_scope["intra_extension_uuid"])
+            self.assertIsInstance(action_category_scope["action_category_scope"], dict)
+
+            new_action_category_scope = dict()
+            new_action_category_scope_uuid = uuid.uuid4().hex
+            new_action_category_scope[new_action_category_scope_uuid] = "admin"
+            action_category_scope = self.admin_manager.set_action_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                action_category,
+                new_action_category_scope)
+            self.assertIsInstance(action_category_scope, dict)
+            self.assertIn("action_category_scope", action_category_scope)
+            self.assertIn("id", action_category_scope)
+            self.assertIn("intra_extension_uuid", action_category_scope)
+            self.assertEqual(self.ref["id"], action_category_scope["intra_extension_uuid"])
+            self.assertIn(new_action_category_scope[new_action_category_scope_uuid],
+                          action_category_scope["action_category_scope"][action_category].values())
+
+            new_action_category_scope2 = dict()
+            new_action_category_scope2_uuid = uuid.uuid4().hex
+            new_action_category_scope2[new_action_category_scope2_uuid] = "dev"
+            action_category_scope = self.admin_manager.set_action_category_scope_dict(
+                "admin",
+                self.ref["id"],
+                action_category,
+                new_action_category_scope2)
+            self.assertIsInstance(action_category_scope, dict)
+            self.assertIn("action_category_scope", action_category_scope)
+            self.assertIn("id", action_category_scope)
+            self.assertIn("intra_extension_uuid", action_category_scope)
+            self.assertEqual(self.ref["id"], action_category_scope["intra_extension_uuid"])
+            self.assertIn(new_action_category_scope2[new_action_category_scope2_uuid],
+                          action_category_scope["action_category_scope"][action_category].values())
+
+            action_category_assignments = self.manager.get_action_category_assignment_dict(
+                "admin",
+                self.ref["id"],
+                new_action["id"]
+            )
+            self.assertIsInstance(action_category_assignments, dict)
+            self.assertIn("action_category_assignments", action_category_assignments)
+            self.assertIn("id", action_category_assignments)
+            self.assertIn("intra_extension_uuid", action_category_assignments)
+            self.assertEqual(self.ref["id"], action_category_assignments["intra_extension_uuid"])
+            self.assertEqual({}, action_category_assignments["action_category_assignments"][new_action["id"]])
+
+            self.assertRaises(
+                AuthIntraExtensionModificationNotAuthorized,
+                self.manager.set_action_category_assignment_dict,
+                "admin", self.ref["id"], new_action["id"],
+                {
+                    new_action_category_uuid: [new_action_category_scope_uuid, new_action_category_scope2_uuid],
+                })
+
+            self.assertRaises(
+                AuthIntraExtensionModificationNotAuthorized,
+                self.manager.del_action_category_assignment,
+                "admin", self.ref["id"], new_action["id"],
+                new_action_category_uuid,
+                new_action_category_scope_uuid)
+
+            self.assertRaises(
+                AuthIntraExtensionModificationNotAuthorized,
+                self.manager.add_action_category_assignment_dict,
+                "admin", self.ref["id"], new_action["id"],
+                new_action_category_uuid,
+                new_action_category_scope_uuid)
+
+    def test_sub_meta_rules(self):
+        self.create_intra_extension()
+
+        aggregation_algorithms = self.manager.get_aggregation_algorithms("admin", self.ref["id"])
+        self.assertIsInstance(aggregation_algorithms, dict)
+        self.assertIsInstance(aggregation_algorithms["aggregation_algorithms"], list)
+        self.assertIn("and_true_aggregation", aggregation_algorithms["aggregation_algorithms"])
+        self.assertIn("test_aggregation", aggregation_algorithms["aggregation_algorithms"])
+
+        aggregation_algorithm = self.manager.get_aggregation_algorithm("admin", self.ref["id"])
+        self.assertIsInstance(aggregation_algorithm, dict)
+        self.assertIn("aggregation", aggregation_algorithm)
+        self.assertIn(aggregation_algorithm["aggregation"], aggregation_algorithms["aggregation_algorithms"])
+
+        _aggregation_algorithm = list(aggregation_algorithms["aggregation_algorithms"])
+        _aggregation_algorithm.remove(aggregation_algorithm["aggregation"])
+        self.assertRaises(
+            AuthIntraExtensionModificationNotAuthorized,
+            self.manager.set_aggregation_algorithm,
+            "admin", self.ref["id"], _aggregation_algorithm[0])
+
+        sub_meta_rules = self.manager.get_sub_meta_rule("admin", self.ref["id"])
+        self.assertIsInstance(sub_meta_rules, dict)
+        self.assertIn("sub_meta_rules", sub_meta_rules)
+        sub_meta_rules_conf = json.load(open(os.path.join(self.policy_directory, self.ref["model"], "metarule.json")))
+        metarule = dict()
+        categories = {
+            "subject_categories": self.manager.get_subject_category_dict("admin", self.ref["id"]),
+            "object_categories": self.manager.get_object_category_dict("admin", self.ref["id"]),
+            "action_categories": self.manager.get_action_category_dict("admin", self.ref["id"])
+        }
+        for relation in sub_meta_rules_conf["sub_meta_rules"]:
+            metarule[relation] = dict()
+            for item in ("subject_categories", "object_categories", "action_categories"):
+                metarule[relation][item] = list()
+                for element in sub_meta_rules_conf["sub_meta_rules"][relation][item]:
+                    metarule[relation][item].append(self.__get_key_from_value(
+                        element,
+                        categories[item][item]
+                    ))
+
+        for relation in sub_meta_rules["sub_meta_rules"]:
+            self.assertIn(relation, metarule)
+            for item in ("subject_categories", "object_categories", "action_categories"):
+                self.assertEqual(
+                    sub_meta_rules["sub_meta_rules"][relation][item],
+                    metarule[relation][item]
+                )
+
+            new_subject_category = {"id": uuid.uuid4().hex, "name": "subject_category_test"}
+            # Add a particular subject_category
+            data = self.admin_manager.add_subject_category_dict(
+                "admin",
+                self.ref["id"],
+                new_subject_category["name"])
+            new_subject_category["id"] = data["subject_category"]["uuid"]
+            subject_categories = self.manager.get_subject_category_dict(
+                "admin",
+                self.ref["id"])
+            self.assertIsInstance(subject_categories, dict)
+            self.assertIn("subject_categories", subject_categories)
+            self.assertIn("id", subject_categories)
+            self.assertIn("intra_extension_uuid", subject_categories)
+            self.assertEqual(self.ref["id"], subject_categories["intra_extension_uuid"])
+            self.assertIn(new_subject_category["id"], subject_categories["subject_categories"])
+            metarule[relation]["subject_categories"].append(new_subject_category["id"])
+            self.assertRaises(
+                AuthIntraExtensionModificationNotAuthorized,
+                self.manager.set_sub_meta_rule,
+                "admin", self.ref["id"], metarule)
+
+    def test_sub_rules(self):
+        self.create_intra_extension()
+
+        sub_meta_rules = self.manager.get_sub_meta_rule("admin", self.ref["id"])
+        self.assertIsInstance(sub_meta_rules, dict)
+        self.assertIn("sub_meta_rules", sub_meta_rules)
+
+        sub_rules = self.manager.get_sub_rules("admin", self.ref["id"])
+        self.assertIsInstance(sub_rules, dict)
+        self.assertIn("rules", sub_rules)
+        rules = dict()
+        for relation in sub_rules["rules"]:
+            self.assertIn(relation, self.manager.get_sub_meta_rule_relations("admin", self.ref["id"])["sub_meta_rule_relations"])
+            rules[relation] = list()
+            for rule in sub_rules["rules"][relation]:
+                for cat, cat_func, func_name in (
+                    ("subject_categories", self.manager.get_subject_category_scope_dict, "subject_category_scope"),
+                    ("action_categories", self.manager.get_action_category_scope_dict, "action_category_scope"),
+                    ("object_categories", self.manager.get_object_category_scope_dict, "object_category_scope"),
+                ):
+                    for cat_value in sub_meta_rules["sub_meta_rules"][relation][cat]:
+                        scope = cat_func(
+                            "admin",
+                            self.ref["id"],
+                            cat_value
+                        )
+                        a_scope = rule.pop(0)
+                        self.assertIn(a_scope, scope[func_name][cat_value])
+
+        # add a new subrule
+
+        relation = sub_rules["rules"].keys()[0]
+        sub_rule = []
+        for cat, cat_func, func_name in (
+            ("subject_categories", self.manager.get_subject_category_scope_dict, "subject_category_scope"),
+            ("action_categories", self.manager.get_action_category_scope_dict, "action_category_scope"),
+            ("object_categories", self.manager.get_object_category_scope_dict, "object_category_scope"),
+        ):
+            for cat_value in sub_meta_rules["sub_meta_rules"][relation][cat]:
+                scope = cat_func(
+                    "admin",
+                    self.ref["id"],
+                    cat_value
+                )
+                sub_rule.append(scope[func_name][cat_value].keys()[0])
+
+        self.assertRaises(
+            AuthIntraExtensionModificationNotAuthorized,
+            self.manager.set_sub_rule,
+            "admin", self.ref["id"], relation, sub_rule)
diff --git a/keystone-moon/keystone/tests/moon/unit/test_unit_core_log.py b/keystone-moon/keystone/tests/moon/unit/test_unit_core_log.py
new file mode 100644 (file)
index 0000000..1b678d5
--- /dev/null
@@ -0,0 +1,4 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
diff --git a/keystone-moon/keystone/tests/moon/unit/test_unit_core_tenant.py b/keystone-moon/keystone/tests/moon/unit/test_unit_core_tenant.py
new file mode 100644 (file)
index 0000000..d9c17bd
--- /dev/null
@@ -0,0 +1,162 @@
+# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+# This software is distributed under the terms and conditions of the 'Apache-2.0'
+# license which can be found in the file 'LICENSE' in this package distribution
+# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
+
+"""Unit tests for core tenant."""
+
+import uuid
+from oslo_config import cfg
+from keystone.tests import unit as tests
+from keystone.contrib.moon.core import TenantManager
+from keystone.tests.unit.ksfixtures import database
+from keystone.contrib.moon.exception import *
+from keystone.tests.unit import default_fixtures
+from keystone.contrib.moon.core import LogManager
+
+CONF = cfg.CONF
+
+
+class TestTenantManager(tests.TestCase):
+
+    def setUp(self):
+        self.useFixture(database.Database())
+        super(TestTenantManager, self).setUp()
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+        self.manager = TenantManager()
+
+    def load_extra_backends(self):
+        return {
+            "moonlog_api": LogManager()
+        }
+
+    def config_overrides(self):
+        super(TestTenantManager, self).config_overrides()
+        self.config_fixture.config(
+            group='moon',
+            tenant_driver='keystone.contrib.moon.backends.sql.TenantConnector')
+
+    def test_add_tenant(self):
+        _uuid = uuid.uuid4().hex
+        new_mapping = {
+            _uuid: {
+                "name": uuid.uuid4().hex,
+                "authz": uuid.uuid4().hex,
+                "admin": uuid.uuid4().hex,
+            }
+        }
+        data = self.manager.set_tenant_dict(
+            tenant_uuid=_uuid,
+            name=new_mapping[_uuid]["name"],
+            authz_extension_uuid=new_mapping[_uuid]["authz"],
+            admin_extension_uuid=new_mapping[_uuid]["admin"]
+        )
+        self.assertEquals(_uuid, data["id"])
+        self.assertEquals(data["name"], new_mapping[_uuid]["name"])
+        self.assertEquals(data["authz"], new_mapping[_uuid]["authz"])
+        self.assertEquals(data["admin"], new_mapping[_uuid]["admin"])
+        data = self.manager.get_tenant_dict()
+        self.assertNotEqual(data, {})
+        data = self.manager.get_tenant_uuid(new_mapping[_uuid]["authz"])
+        self.assertEquals(_uuid, data)
+        data = self.manager.get_tenant_uuid(new_mapping[_uuid]["admin"])
+        self.assertEquals(_uuid, data)
+        data = self.manager.get_admin_extension_uuid(new_mapping[_uuid]["authz"])
+        self.assertEquals(new_mapping[_uuid]["admin"], data)
+
+    def test_tenant_list_empty(self):
+        data = self.manager.get_tenant_dict()
+        self.assertEqual(data, {})
+
+    def test_set_tenant_name(self):
+        _uuid = uuid.uuid4().hex
+        new_mapping = {
+            _uuid: {
+                "name": uuid.uuid4().hex,
+                "authz": uuid.uuid4().hex,
+                "admin": uuid.uuid4().hex,
+            }
+        }
+        data = self.manager.set_tenant_dict(
+            tenant_uuid=_uuid,
+            name=new_mapping[_uuid]["name"],
+            authz_extension_uuid=new_mapping[_uuid]["authz"],
+            admin_extension_uuid=new_mapping[_uuid]["admin"]
+        )
+        self.assertEquals(_uuid, data["id"])
+        self.assertEquals(data["name"], new_mapping[_uuid]["name"])
+        data = self.manager.set_tenant_name(_uuid, "new name")
+        self.assertEquals(_uuid, data["id"])
+        self.assertEquals(data["name"], "new name")
+        data = self.manager.get_tenant_name(_uuid)
+        self.assertEquals(data, "new name")
+
+    def test_delete_tenant(self):
+        _uuid = uuid.uuid4().hex
+        new_mapping = {
+            _uuid: {
+                "name": uuid.uuid4().hex,
+                "authz": uuid.uuid4().hex,
+                "admin": uuid.uuid4().hex,
+            }
+        }
+        data = self.manager.set_tenant_dict(
+            tenant_uuid=_uuid,
+            name=new_mapping[_uuid]["name"],
+            authz_extension_uuid=new_mapping[_uuid]["authz"],
+            admin_extension_uuid=new_mapping[_uuid]["admin"]
+        )
+        self.assertEquals(_uuid, data["id"])
+        self.assertEquals(data["name"], new_mapping[_uuid]["name"])
+        self.assertEquals(data["authz"], new_mapping[_uuid]["authz"])
+        self.assertEquals(data["admin"], new_mapping[_uuid]["admin"])
+        data = self.manager.get_tenant_dict()
+        self.assertNotEqual(data, {})
+        self.manager.delete(new_mapping[_uuid]["authz"])
+        data = self.manager.get_tenant_dict()
+        self.assertEqual(data, {})
+
+    def test_get_extension_uuid(self):
+        _uuid = uuid.uuid4().hex
+        new_mapping = {
+            _uuid: {
+                "name": uuid.uuid4().hex,
+                "authz": uuid.uuid4().hex,
+                "admin": uuid.uuid4().hex,
+            }
+        }
+        data = self.manager.set_tenant_dict(
+            tenant_uuid=_uuid,
+            name=new_mapping[_uuid]["name"],
+            authz_extension_uuid=new_mapping[_uuid]["authz"],
+            admin_extension_uuid=new_mapping[_uuid]["admin"]
+        )
+        self.assertEquals(_uuid, data["id"])
+        data = self.manager.get_extension_uuid(_uuid)
+        self.assertEqual(data, new_mapping[_uuid]["authz"])
+        data = self.manager.get_extension_uuid(_uuid, "admin")
+        self.assertEqual(data, new_mapping[_uuid]["admin"])
+
+    def test_unkown_tenant_uuid(self):
+        self.assertRaises(TenantNotFoundError, self.manager.get_tenant_name, uuid.uuid4().hex)
+        self.assertRaises(TenantNotFoundError, self.manager.set_tenant_name, uuid.uuid4().hex, "new name")
+        self.assertRaises(TenantNotFoundError, self.manager.get_extension_uuid, uuid.uuid4().hex)
+        _uuid = uuid.uuid4().hex
+        new_mapping = {
+            _uuid: {
+                "name": uuid.uuid4().hex,
+                "authz": uuid.uuid4().hex,
+                "admin": uuid.uuid4().hex,
+            }
+        }
+        data = self.manager.set_tenant_dict(
+            tenant_uuid=_uuid,
+            name=new_mapping[_uuid]["name"],
+            authz_extension_uuid=new_mapping[_uuid]["authz"],
+            admin_extension_uuid=""
+        )
+        self.assertEquals(_uuid, data["id"])
+        self.assertRaises(IntraExtensionNotFound, self.manager.get_extension_uuid, _uuid, "admin")
+        self.assertRaises(TenantNotFoundError, self.manager.get_tenant_uuid, uuid.uuid4().hex)
+        # self.assertRaises(AdminIntraExtensionNotFound, self.manager.get_admin_extension_uuid, uuid.uuid4().hex)
diff --git a/keystone-moon/keystone/tests/unit/__init__.py b/keystone-moon/keystone/tests/unit/__init__.py
new file mode 100644 (file)
index 0000000..c97ce25
--- /dev/null
@@ -0,0 +1,41 @@
+# Copyright 2013 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import oslo_i18n
+import six
+
+
+if six.PY3:
+    # NOTE(dstanek): This block will monkey patch libraries that are not
+    # yet supported in Python3. We do this that that it is possible to
+    # execute any tests at all. Without monkey patching modules the
+    # tests will fail with import errors.
+
+    import sys
+    from unittest import mock  # noqa: our import detection is naive?
+
+    sys.modules['eventlet'] = mock.Mock()
+    sys.modules['eventlet.green'] = mock.Mock()
+    sys.modules['eventlet.wsgi'] = mock.Mock()
+    sys.modules['oslo'].messaging = mock.Mock()
+    sys.modules['pycadf'] = mock.Mock()
+    sys.modules['paste'] = mock.Mock()
+
+# NOTE(dstanek): oslo_i18n.enable_lazy() must be called before
+# keystone.i18n._() is called to ensure it has the desired lazy lookup
+# behavior. This includes cases, like keystone.exceptions, where
+# keystone.i18n._() is called at import time.
+oslo_i18n.enable_lazy()
+
+from keystone.tests.unit.core import *  # noqa
diff --git a/keystone-moon/keystone/tests/unit/backend/__init__.py b/keystone-moon/keystone/tests/unit/backend/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/tests/unit/backend/core_ldap.py b/keystone-moon/keystone/tests/unit/backend/core_ldap.py
new file mode 100644 (file)
index 0000000..9d6b23e
--- /dev/null
@@ -0,0 +1,161 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import ldap
+
+from oslo_config import cfg
+
+from keystone.common import cache
+from keystone.common import ldap as common_ldap
+from keystone.common.ldap import core as common_ldap_core
+from keystone.common import sql
+from keystone.tests import unit as tests
+from keystone.tests.unit import default_fixtures
+from keystone.tests.unit import fakeldap
+from keystone.tests.unit.ksfixtures import database
+
+
+CONF = cfg.CONF
+
+
+def create_group_container(identity_api):
+        # Create the groups base entry (ou=Groups,cn=example,cn=com)
+        group_api = identity_api.driver.group
+        conn = group_api.get_connection()
+        dn = 'ou=Groups,cn=example,cn=com'
+        conn.add_s(dn, [('objectclass', ['organizationalUnit']),
+                        ('ou', ['Groups'])])
+
+
+class BaseBackendLdapCommon(object):
+    """Mixin class to set up generic LDAP backends."""
+
+    def setUp(self):
+        super(BaseBackendLdapCommon, self).setUp()
+
+        common_ldap.register_handler('fake://', fakeldap.FakeLdap)
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+
+        self.addCleanup(common_ldap_core._HANDLERS.clear)
+        self.addCleanup(self.clear_database)
+
+    def _get_domain_fixture(self):
+        """Domains in LDAP are read-only, so just return the static one."""
+        return self.resource_api.get_domain(CONF.identity.default_domain_id)
+
+    def clear_database(self):
+        for shelf in fakeldap.FakeShelves:
+            fakeldap.FakeShelves[shelf].clear()
+
+    def reload_backends(self, domain_id):
+        # Only one backend unless we are using separate domain backends
+        self.load_backends()
+
+    def get_config(self, domain_id):
+        # Only one conf structure unless we are using separate domain backends
+        return CONF
+
+    def config_overrides(self):
+        super(BaseBackendLdapCommon, self).config_overrides()
+        self.config_fixture.config(
+            group='identity',
+            driver='keystone.identity.backends.ldap.Identity')
+
+    def config_files(self):
+        config_files = super(BaseBackendLdapCommon, self).config_files()
+        config_files.append(tests.dirs.tests_conf('backend_ldap.conf'))
+        return config_files
+
+    def get_user_enabled_vals(self, user):
+            user_dn = (
+                self.identity_api.driver.user._id_to_dn_string(user['id']))
+            enabled_attr_name = CONF.ldap.user_enabled_attribute
+
+            ldap_ = self.identity_api.driver.user.get_connection()
+            res = ldap_.search_s(user_dn,
+                                 ldap.SCOPE_BASE,
+                                 u'(sn=%s)' % user['name'])
+            if enabled_attr_name in res[0][1]:
+                return res[0][1][enabled_attr_name]
+            else:
+                return None
+
+
+class BaseBackendLdap(object):
+    """Mixin class to set up an all-LDAP configuration."""
+    def setUp(self):
+        # NOTE(dstanek): The database must be setup prior to calling the
+        # parent's setUp. The parent's setUp uses services (like
+        # credentials) that require a database.
+        self.useFixture(database.Database())
+        super(BaseBackendLdap, self).setUp()
+
+    def load_fixtures(self, fixtures):
+        # Override super impl since need to create group container.
+        create_group_container(self.identity_api)
+        super(BaseBackendLdap, self).load_fixtures(fixtures)
+
+
+class BaseBackendLdapIdentitySqlEverythingElse(tests.SQLDriverOverrides):
+    """Mixin base for Identity LDAP, everything else SQL backend tests."""
+
+    def config_files(self):
+        config_files = super(BaseBackendLdapIdentitySqlEverythingElse,
+                             self).config_files()
+        config_files.append(tests.dirs.tests_conf('backend_ldap_sql.conf'))
+        return config_files
+
+    def setUp(self):
+        self.useFixture(database.Database())
+        super(BaseBackendLdapIdentitySqlEverythingElse, self).setUp()
+        self.clear_database()
+        self.load_backends()
+        cache.configure_cache_region(cache.REGION)
+        self.engine = sql.get_engine()
+        self.addCleanup(sql.cleanup)
+
+        sql.ModelBase.metadata.create_all(bind=self.engine)
+        self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
+
+        self.load_fixtures(default_fixtures)
+        # defaulted by the data load
+        self.user_foo['enabled'] = True
+
+    def config_overrides(self):
+        super(BaseBackendLdapIdentitySqlEverythingElse,
+              self).config_overrides()
+        self.config_fixture.config(
+            group='identity',
+            driver='keystone.identity.backends.ldap.Identity')
+        self.config_fixture.config(
+            group='resource',
+            driver='keystone.resource.backends.sql.Resource')
+        self.config_fixture.config(
+            group='assignment',
+            driver='keystone.assignment.backends.sql.Assignment')
+
+
+class BaseBackendLdapIdentitySqlEverythingElseWithMapping(object):
+    """Mixin base class to test mapping of default LDAP backend.
+
+    The default configuration is not to enable mapping when using a single
+    backend LDAP driver.  However, a cloud provider might want to enable
+    the mapping, hence hiding the LDAP IDs from any clients of keystone.
+    Setting backward_compatible_ids to False will enable this mapping.
+
+    """
+    def config_overrides(self):
+        super(BaseBackendLdapIdentitySqlEverythingElseWithMapping,
+              self).config_overrides()
+        self.config_fixture.config(group='identity_mapping',
+                                   backward_compatible_ids=False)
diff --git a/keystone-moon/keystone/tests/unit/backend/core_sql.py b/keystone-moon/keystone/tests/unit/backend/core_sql.py
new file mode 100644 (file)
index 0000000..9cbd858
--- /dev/null
@@ -0,0 +1,53 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy
+
+from keystone.common import sql
+from keystone.tests import unit as tests
+from keystone.tests.unit import default_fixtures
+from keystone.tests.unit.ksfixtures import database
+
+
+class BaseBackendSqlTests(tests.SQLDriverOverrides, tests.TestCase):
+
+    def setUp(self):
+        super(BaseBackendSqlTests, self).setUp()
+        self.useFixture(database.Database())
+        self.load_backends()
+
+        # populate the engine with tables & fixtures
+        self.load_fixtures(default_fixtures)
+        # defaulted by the data load
+        self.user_foo['enabled'] = True
+
+    def config_files(self):
+        config_files = super(BaseBackendSqlTests, self).config_files()
+        config_files.append(tests.dirs.tests_conf('backend_sql.conf'))
+        return config_files
+
+
+class BaseBackendSqlModels(BaseBackendSqlTests):
+
+    def select_table(self, name):
+        table = sqlalchemy.Table(name,
+                                 sql.ModelBase.metadata,
+                                 autoload=True)
+        s = sqlalchemy.select([table])
+        return s
+
+    def assertExpectedSchema(self, table, cols):
+        table = self.select_table(table)
+        for col, type_, length in cols:
+            self.assertIsInstance(table.c[col].type, type_)
+            if length:
+                self.assertEqual(length, table.c[col].type.length)
diff --git a/keystone-moon/keystone/tests/unit/backend/domain_config/__init__.py b/keystone-moon/keystone/tests/unit/backend/domain_config/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/tests/unit/backend/domain_config/core.py b/keystone-moon/keystone/tests/unit/backend/domain_config/core.py
new file mode 100644 (file)
index 0000000..da2e9bd
--- /dev/null
@@ -0,0 +1,523 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import uuid
+
+import mock
+from testtools import matchers
+
+from keystone import exception
+
+
+class DomainConfigTests(object):
+
+    def setUp(self):
+        self.domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(self.domain['id'], self.domain)
+        self.addCleanup(self.clean_up_domain)
+
+    def clean_up_domain(self):
+        # NOTE(henry-nash): Deleting the domain will also delete any domain
+        # configs for this domain.
+        self.domain['enabled'] = False
+        self.resource_api.update_domain(self.domain['id'], self.domain)
+        self.resource_api.delete_domain(self.domain['id'])
+        del self.domain
+
+    def _domain_config_crud(self, sensitive):
+        group = uuid.uuid4().hex
+        option = uuid.uuid4().hex
+        value = uuid.uuid4().hex
+        self.domain_config_api.create_config_option(
+            self.domain['id'], group, option, value, sensitive)
+        res = self.domain_config_api.get_config_option(
+            self.domain['id'], group, option, sensitive)
+        config = {'group': group, 'option': option, 'value': value}
+        self.assertEqual(config, res)
+
+        value = uuid.uuid4().hex
+        self.domain_config_api.update_config_option(
+            self.domain['id'], group, option, value, sensitive)
+        res = self.domain_config_api.get_config_option(
+            self.domain['id'], group, option, sensitive)
+        config = {'group': group, 'option': option, 'value': value}
+        self.assertEqual(config, res)
+
+        self.domain_config_api.delete_config_options(
+            self.domain['id'], group, option, sensitive)
+        self.assertRaises(exception.DomainConfigNotFound,
+                          self.domain_config_api.get_config_option,
+                          self.domain['id'], group, option, sensitive)
+        # ...and silent if we try to delete it again
+        self.domain_config_api.delete_config_options(
+            self.domain['id'], group, option, sensitive)
+
+    def test_whitelisted_domain_config_crud(self):
+        self._domain_config_crud(sensitive=False)
+
+    def test_sensitive_domain_config_crud(self):
+        self._domain_config_crud(sensitive=True)
+
+    def _list_domain_config(self, sensitive):
+        """Test listing by combination of domain, group & option."""
+
+        config1 = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex,
+                   'value': uuid.uuid4().hex}
+        # Put config2 in the same group as config1
+        config2 = {'group': config1['group'], 'option': uuid.uuid4().hex,
+                   'value': uuid.uuid4().hex}
+        config3 = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex,
+                   'value': 100}
+        for config in [config1, config2, config3]:
+            self.domain_config_api.create_config_option(
+                self.domain['id'], config['group'], config['option'],
+                config['value'], sensitive)
+
+        # Try listing all items from a domain
+        res = self.domain_config_api.list_config_options(
+            self.domain['id'], sensitive=sensitive)
+        self.assertThat(res, matchers.HasLength(3))
+        for res_entry in res:
+            self.assertIn(res_entry, [config1, config2, config3])
+
+        # Try listing by domain and group
+        res = self.domain_config_api.list_config_options(
+            self.domain['id'], group=config1['group'], sensitive=sensitive)
+        self.assertThat(res, matchers.HasLength(2))
+        for res_entry in res:
+            self.assertIn(res_entry, [config1, config2])
+
+        # Try listing by domain, group and option
+        res = self.domain_config_api.list_config_options(
+            self.domain['id'], group=config2['group'],
+            option=config2['option'], sensitive=sensitive)
+        self.assertThat(res, matchers.HasLength(1))
+        self.assertEqual(config2, res[0])
+
+    def test_list_whitelisted_domain_config_crud(self):
+        self._list_domain_config(False)
+
+    def test_list_sensitive_domain_config_crud(self):
+        self._list_domain_config(True)
+
+    def _delete_domain_configs(self, sensitive):
+        """Test deleting by combination of domain, group & option."""
+
+        config1 = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex,
+                   'value': uuid.uuid4().hex}
+        # Put config2 and config3 in the same group as config1
+        config2 = {'group': config1['group'], 'option': uuid.uuid4().hex,
+                   'value': uuid.uuid4().hex}
+        config3 = {'group': config1['group'], 'option': uuid.uuid4().hex,
+                   'value': uuid.uuid4().hex}
+        config4 = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex,
+                   'value': uuid.uuid4().hex}
+        for config in [config1, config2, config3, config4]:
+            self.domain_config_api.create_config_option(
+                self.domain['id'], config['group'], config['option'],
+                config['value'], sensitive)
+
+        # Try deleting by domain, group and option
+        res = self.domain_config_api.delete_config_options(
+            self.domain['id'], group=config2['group'],
+            option=config2['option'], sensitive=sensitive)
+        res = self.domain_config_api.list_config_options(
+            self.domain['id'], sensitive=sensitive)
+        self.assertThat(res, matchers.HasLength(3))
+        for res_entry in res:
+            self.assertIn(res_entry, [config1, config3, config4])
+
+        # Try deleting by domain and group
+        res = self.domain_config_api.delete_config_options(
+            self.domain['id'], group=config4['group'], sensitive=sensitive)
+        res = self.domain_config_api.list_config_options(
+            self.domain['id'], sensitive=sensitive)
+        self.assertThat(res, matchers.HasLength(2))
+        for res_entry in res:
+            self.assertIn(res_entry, [config1, config3])
+
+        # Try deleting all items from a domain
+        res = self.domain_config_api.delete_config_options(
+            self.domain['id'], sensitive=sensitive)
+        res = self.domain_config_api.list_config_options(
+            self.domain['id'], sensitive=sensitive)
+        self.assertThat(res, matchers.HasLength(0))
+
+    def test_delete_whitelisted_domain_configs(self):
+        self._delete_domain_configs(False)
+
+    def test_delete_sensitive_domain_configs(self):
+        self._delete_domain_configs(True)
+
+    def _create_domain_config_twice(self, sensitive):
+        """Test conflict error thrown if create the same option twice."""
+
+        config = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex,
+                  'value': uuid.uuid4().hex}
+
+        self.domain_config_api.create_config_option(
+            self.domain['id'], config['group'], config['option'],
+            config['value'], sensitive=sensitive)
+        self.assertRaises(exception.Conflict,
+                          self.domain_config_api.create_config_option,
+                          self.domain['id'], config['group'], config['option'],
+                          config['value'], sensitive=sensitive)
+
+    def test_create_whitelisted_domain_config_twice(self):
+        self._create_domain_config_twice(False)
+
+    def test_create_sensitive_domain_config_twice(self):
+        self._create_domain_config_twice(True)
+
+    def test_delete_domain_deletes_configs(self):
+        """Test domain deletion clears the domain configs."""
+
+        domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain['id'], domain)
+        config1 = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex,
+                   'value': uuid.uuid4().hex}
+        # Put config2 in the same group as config1
+        config2 = {'group': config1['group'], 'option': uuid.uuid4().hex,
+                   'value': uuid.uuid4().hex}
+        self.domain_config_api.create_config_option(
+            domain['id'], config1['group'], config1['option'],
+            config1['value'])
+        self.domain_config_api.create_config_option(
+            domain['id'], config2['group'], config2['option'],
+            config2['value'], sensitive=True)
+        res = self.domain_config_api.list_config_options(
+            domain['id'])
+        self.assertThat(res, matchers.HasLength(1))
+        res = self.domain_config_api.list_config_options(
+            domain['id'], sensitive=True)
+        self.assertThat(res, matchers.HasLength(1))
+
+        # Now delete the domain
+        domain['enabled'] = False
+        self.resource_api.update_domain(domain['id'], domain)
+        self.resource_api.delete_domain(domain['id'])
+
+        # Check domain configs have also been deleted
+        res = self.domain_config_api.list_config_options(
+            domain['id'])
+        self.assertThat(res, matchers.HasLength(0))
+        res = self.domain_config_api.list_config_options(
+            domain['id'], sensitive=True)
+        self.assertThat(res, matchers.HasLength(0))
+
+    def test_create_domain_config_including_sensitive_option(self):
+        config = {'ldap': {'url': uuid.uuid4().hex,
+                           'user_tree_dn': uuid.uuid4().hex,
+                           'password': uuid.uuid4().hex}}
+        self.domain_config_api.create_config(self.domain['id'], config)
+
+        # password is sensitive, so check that the whitelisted portion and
+        # the sensitive piece have been stored in the appropriate locations.
+        res = self.domain_config_api.get_config(self.domain['id'])
+        config_whitelisted = copy.deepcopy(config)
+        config_whitelisted['ldap'].pop('password')
+        self.assertEqual(config_whitelisted, res)
+        res = self.domain_config_api.get_config_option(
+            self.domain['id'], 'ldap', 'password', sensitive=True)
+        self.assertEqual(config['ldap']['password'], res['value'])
+
+        # Finally, use the non-public API to get back the whole config
+        res = self.domain_config_api.get_config_with_sensitive_info(
+            self.domain['id'])
+        self.assertEqual(config, res)
+
+    def test_get_partial_domain_config(self):
+        config = {'ldap': {'url': uuid.uuid4().hex,
+                           'user_tree_dn': uuid.uuid4().hex,
+                           'password': uuid.uuid4().hex},
+                  'identity': {'driver': uuid.uuid4().hex}}
+        self.domain_config_api.create_config(self.domain['id'], config)
+
+        res = self.domain_config_api.get_config(self.domain['id'],
+                                                group='identity')
+        config_partial = copy.deepcopy(config)
+        config_partial.pop('ldap')
+        self.assertEqual(config_partial, res)
+        res = self.domain_config_api.get_config(
+            self.domain['id'], group='ldap', option='user_tree_dn')
+        self.assertEqual({'user_tree_dn': config['ldap']['user_tree_dn']}, res)
+        # ...but we should fail to get a sensitive option
+        self.assertRaises(exception.DomainConfigNotFound,
+                          self.domain_config_api.get_config, self.domain['id'],
+                          group='ldap', option='password')
+
+    def test_delete_partial_domain_config(self):
+        config = {'ldap': {'url': uuid.uuid4().hex,
+                           'user_tree_dn': uuid.uuid4().hex,
+                           'password': uuid.uuid4().hex},
+                  'identity': {'driver': uuid.uuid4().hex}}
+        self.domain_config_api.create_config(self.domain['id'], config)
+
+        self.domain_config_api.delete_config(
+            self.domain['id'], group='identity')
+        config_partial = copy.deepcopy(config)
+        config_partial.pop('identity')
+        config_partial['ldap'].pop('password')
+        res = self.domain_config_api.get_config(self.domain['id'])
+        self.assertEqual(config_partial, res)
+
+        self.domain_config_api.delete_config(
+            self.domain['id'], group='ldap', option='url')
+        config_partial = copy.deepcopy(config_partial)
+        config_partial['ldap'].pop('url')
+        res = self.domain_config_api.get_config(self.domain['id'])
+        self.assertEqual(config_partial, res)
+
+    def test_get_options_not_in_domain_config(self):
+        self.assertRaises(exception.DomainConfigNotFound,
+                          self.domain_config_api.get_config, self.domain['id'])
+        config = {'ldap': {'url': uuid.uuid4().hex}}
+
+        self.domain_config_api.create_config(self.domain['id'], config)
+
+        self.assertRaises(exception.DomainConfigNotFound,
+                          self.domain_config_api.get_config, self.domain['id'],
+                          group='identity')
+        self.assertRaises(exception.DomainConfigNotFound,
+                          self.domain_config_api.get_config, self.domain['id'],
+                          group='ldap', option='user_tree_dn')
+
+    def test_get_sensitive_config(self):
+        config = {'ldap': {'url': uuid.uuid4().hex,
+                           'user_tree_dn': uuid.uuid4().hex,
+                           'password': uuid.uuid4().hex},
+                  'identity': {'driver': uuid.uuid4().hex}}
+        res = self.domain_config_api.get_config_with_sensitive_info(
+            self.domain['id'])
+        self.assertEqual({}, res)
+        self.domain_config_api.create_config(self.domain['id'], config)
+        res = self.domain_config_api.get_config_with_sensitive_info(
+            self.domain['id'])
+        self.assertEqual(config, res)
+
+    def test_update_partial_domain_config(self):
+        config = {'ldap': {'url': uuid.uuid4().hex,
+                           'user_tree_dn': uuid.uuid4().hex,
+                           'password': uuid.uuid4().hex},
+                  'identity': {'driver': uuid.uuid4().hex}}
+        self.domain_config_api.create_config(self.domain['id'], config)
+
+        # Try updating a group
+        new_config = {'ldap': {'url': uuid.uuid4().hex,
+                               'user_filter': uuid.uuid4().hex}}
+        res = self.domain_config_api.update_config(
+            self.domain['id'], new_config, group='ldap')
+        expected_config = copy.deepcopy(config)
+        expected_config['ldap']['url'] = new_config['ldap']['url']
+        expected_config['ldap']['user_filter'] = (
+            new_config['ldap']['user_filter'])
+        expected_full_config = copy.deepcopy(expected_config)
+        expected_config['ldap'].pop('password')
+        res = self.domain_config_api.get_config(self.domain['id'])
+        self.assertEqual(expected_config, res)
+        # The sensitive option should still existsss
+        res = self.domain_config_api.get_config_with_sensitive_info(
+            self.domain['id'])
+        self.assertEqual(expected_full_config, res)
+
+        # Try updating a single whitelisted option
+        self.domain_config_api.delete_config(self.domain['id'])
+        self.domain_config_api.create_config(self.domain['id'], config)
+        new_config = {'url': uuid.uuid4().hex}
+        res = self.domain_config_api.update_config(
+            self.domain['id'], new_config, group='ldap', option='url')
+
+        # Make sure whitelisted and full config is updated
+        expected_whitelisted_config = copy.deepcopy(config)
+        expected_whitelisted_config['ldap']['url'] = new_config['url']
+        expected_full_config = copy.deepcopy(expected_whitelisted_config)
+        expected_whitelisted_config['ldap'].pop('password')
+        self.assertEqual(expected_whitelisted_config, res)
+        res = self.domain_config_api.get_config(self.domain['id'])
+        self.assertEqual(expected_whitelisted_config, res)
+        res = self.domain_config_api.get_config_with_sensitive_info(
+            self.domain['id'])
+        self.assertEqual(expected_full_config, res)
+
+        # Try updating a single sensitive option
+        self.domain_config_api.delete_config(self.domain['id'])
+        self.domain_config_api.create_config(self.domain['id'], config)
+        new_config = {'password': uuid.uuid4().hex}
+        res = self.domain_config_api.update_config(
+            self.domain['id'], new_config, group='ldap', option='password')
+        # The whitelisted config should not have changed...
+        expected_whitelisted_config = copy.deepcopy(config)
+        expected_full_config = copy.deepcopy(config)
+        expected_whitelisted_config['ldap'].pop('password')
+        self.assertEqual(expected_whitelisted_config, res)
+        res = self.domain_config_api.get_config(self.domain['id'])
+        self.assertEqual(expected_whitelisted_config, res)
+        expected_full_config['ldap']['password'] = new_config['password']
+        res = self.domain_config_api.get_config_with_sensitive_info(
+            self.domain['id'])
+        # ...but the sensitive piece should have.
+        self.assertEqual(expected_full_config, res)
+
+    def test_update_invalid_partial_domain_config(self):
+        config = {'ldap': {'url': uuid.uuid4().hex,
+                           'user_tree_dn': uuid.uuid4().hex,
+                           'password': uuid.uuid4().hex},
+                  'identity': {'driver': uuid.uuid4().hex}}
+        # An extra group, when specifying one group should fail
+        self.assertRaises(exception.InvalidDomainConfig,
+                          self.domain_config_api.update_config,
+                          self.domain['id'], config, group='ldap')
+        # An extra option, when specifying one option should fail
+        self.assertRaises(exception.InvalidDomainConfig,
+                          self.domain_config_api.update_config,
+                          self.domain['id'], config['ldap'],
+                          group='ldap', option='url')
+
+        # Now try the right number of groups/options, but just not
+        # ones that are in the config provided
+        config = {'ldap': {'user_tree_dn': uuid.uuid4().hex}}
+        self.assertRaises(exception.InvalidDomainConfig,
+                          self.domain_config_api.update_config,
+                          self.domain['id'], config, group='identity')
+        self.assertRaises(exception.InvalidDomainConfig,
+                          self.domain_config_api.update_config,
+                          self.domain['id'], config['ldap'], group='ldap',
+                          option='url')
+
+        # Now some valid groups/options, but just not ones that are in the
+        # existing config
+        config = {'ldap': {'user_tree_dn': uuid.uuid4().hex}}
+        self.domain_config_api.create_config(self.domain['id'], config)
+        config_wrong_group = {'identity': {'driver': uuid.uuid4().hex}}
+        self.assertRaises(exception.DomainConfigNotFound,
+                          self.domain_config_api.update_config,
+                          self.domain['id'], config_wrong_group,
+                          group='identity')
+        config_wrong_option = {'url': uuid.uuid4().hex}
+        self.assertRaises(exception.DomainConfigNotFound,
+                          self.domain_config_api.update_config,
+                          self.domain['id'], config_wrong_option,
+                          group='ldap', option='url')
+
+        # And finally just some bad groups/options
+        bad_group = uuid.uuid4().hex
+        config = {bad_group: {'user': uuid.uuid4().hex}}
+        self.assertRaises(exception.InvalidDomainConfig,
+                          self.domain_config_api.update_config,
+                          self.domain['id'], config, group=bad_group,
+                          option='user')
+        bad_option = uuid.uuid4().hex
+        config = {'ldap': {bad_option: uuid.uuid4().hex}}
+        self.assertRaises(exception.InvalidDomainConfig,
+                          self.domain_config_api.update_config,
+                          self.domain['id'], config, group='ldap',
+                          option=bad_option)
+
+    def test_create_invalid_domain_config(self):
+        self.assertRaises(exception.InvalidDomainConfig,
+                          self.domain_config_api.create_config,
+                          self.domain['id'], {})
+        config = {uuid.uuid4().hex: uuid.uuid4().hex}
+        self.assertRaises(exception.InvalidDomainConfig,
+                          self.domain_config_api.create_config,
+                          self.domain['id'], config)
+        config = {uuid.uuid4().hex: {uuid.uuid4().hex: uuid.uuid4().hex}}
+        self.assertRaises(exception.InvalidDomainConfig,
+                          self.domain_config_api.create_config,
+                          self.domain['id'], config)
+        config = {'ldap': {uuid.uuid4().hex: uuid.uuid4().hex}}
+        self.assertRaises(exception.InvalidDomainConfig,
+                          self.domain_config_api.create_config,
+                          self.domain['id'], config)
+        # Try an option that IS in the standard conf, but neither whitelisted
+        # or marked as sensitive
+        config = {'ldap': {'role_tree_dn': uuid.uuid4().hex}}
+        self.assertRaises(exception.InvalidDomainConfig,
+                          self.domain_config_api.create_config,
+                          self.domain['id'], config)
+
+    def test_delete_invalid_partial_domain_config(self):
+        config = {'ldap': {'url': uuid.uuid4().hex}}
+        self.domain_config_api.create_config(self.domain['id'], config)
+        # Try deleting a group not in the config
+        self.assertRaises(exception.DomainConfigNotFound,
+                          self.domain_config_api.delete_config,
+                          self.domain['id'], group='identity')
+        # Try deleting an option not in the config
+        self.assertRaises(exception.DomainConfigNotFound,
+                          self.domain_config_api.delete_config,
+                          self.domain['id'],
+                          group='ldap', option='user_tree_dn')
+
+    def test_sensitive_substitution_in_domain_config(self):
+        # Create a config that contains a whitelisted option that requires
+        # substitution of a sensitive option.
+        config = {'ldap': {'url': 'my_url/%(password)s',
+                           'user_tree_dn': uuid.uuid4().hex,
+                           'password': uuid.uuid4().hex},
+                  'identity': {'driver': uuid.uuid4().hex}}
+        self.domain_config_api.create_config(self.domain['id'], config)
+
+        # Read back the config with the internal method and ensure that the
+        # substitution has taken place.
+        res = self.domain_config_api.get_config_with_sensitive_info(
+            self.domain['id'])
+        expected_url = (
+            config['ldap']['url'] % {'password': config['ldap']['password']})
+        self.assertEqual(expected_url, res['ldap']['url'])
+
+    def test_invalid_sensitive_substitution_in_domain_config(self):
+        """Check that invalid substitutions raise warnings."""
+
+        mock_log = mock.Mock()
+
+        invalid_option_config = {
+            'ldap': {'user_tree_dn': uuid.uuid4().hex,
+                     'password': uuid.uuid4().hex},
+            'identity': {'driver': uuid.uuid4().hex}}
+
+        for invalid_option in ['my_url/%(passssword)s',
+                               'my_url/%(password',
+                               'my_url/%(password)',
+                               'my_url/%(password)d']:
+            invalid_option_config['ldap']['url'] = invalid_option
+            self.domain_config_api.create_config(
+                self.domain['id'], invalid_option_config)
+
+            with mock.patch('keystone.resource.core.LOG', mock_log):
+                res = self.domain_config_api.get_config_with_sensitive_info(
+                    self.domain['id'])
+            mock_log.warn.assert_any_call(mock.ANY)
+            self.assertEqual(
+                invalid_option_config['ldap']['url'], res['ldap']['url'])
+
+    def test_escaped_sequence_in_domain_config(self):
+        """Check that escaped '%(' doesn't get interpreted."""
+
+        mock_log = mock.Mock()
+
+        escaped_option_config = {
+            'ldap': {'url': 'my_url/%%(password)s',
+                     'user_tree_dn': uuid.uuid4().hex,
+                     'password': uuid.uuid4().hex},
+            'identity': {'driver': uuid.uuid4().hex}}
+
+        self.domain_config_api.create_config(
+            self.domain['id'], escaped_option_config)
+
+        with mock.patch('keystone.resource.core.LOG', mock_log):
+            res = self.domain_config_api.get_config_with_sensitive_info(
+                self.domain['id'])
+        self.assertFalse(mock_log.warn.called)
+        # The escaping '%' should have been removed
+        self.assertEqual('my_url/%(password)s', res['ldap']['url'])
diff --git a/keystone-moon/keystone/tests/unit/backend/domain_config/test_sql.py b/keystone-moon/keystone/tests/unit/backend/domain_config/test_sql.py
new file mode 100644 (file)
index 0000000..6459ede
--- /dev/null
@@ -0,0 +1,41 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from keystone.common import sql
+from keystone.tests.unit.backend import core_sql
+from keystone.tests.unit.backend.domain_config import core
+
+
+class SqlDomainConfigModels(core_sql.BaseBackendSqlModels):
+
+    def test_whitelisted_model(self):
+        cols = (('domain_id', sql.String, 64),
+                ('group', sql.String, 255),
+                ('option', sql.String, 255),
+                ('value', sql.JsonBlob, None))
+        self.assertExpectedSchema('whitelisted_config', cols)
+
+    def test_sensitive_model(self):
+        cols = (('domain_id', sql.String, 64),
+                ('group', sql.String, 255),
+                ('option', sql.String, 255),
+                ('value', sql.JsonBlob, None))
+        self.assertExpectedSchema('sensitive_config', cols)
+
+
+class SqlDomainConfig(core_sql.BaseBackendSqlTests, core.DomainConfigTests):
+    def setUp(self):
+        super(SqlDomainConfig, self).setUp()
+        # core.DomainConfigTests is effectively a mixin class, so make sure we
+        # call its setup
+        core.DomainConfigTests.setUp(self)
diff --git a/keystone-moon/keystone/tests/unit/backend/role/__init__.py b/keystone-moon/keystone/tests/unit/backend/role/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/tests/unit/backend/role/core.py b/keystone-moon/keystone/tests/unit/backend/role/core.py
new file mode 100644 (file)
index 0000000..f6e47fe
--- /dev/null
@@ -0,0 +1,130 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import uuid
+
+from keystone import exception
+from keystone.tests import unit as tests
+from keystone.tests.unit import default_fixtures
+
+
+class RoleTests(object):
+
+    def test_get_role_404(self):
+        self.assertRaises(exception.RoleNotFound,
+                          self.role_api.get_role,
+                          uuid.uuid4().hex)
+
+    def test_create_duplicate_role_name_fails(self):
+        role = {'id': 'fake1',
+                'name': 'fake1name'}
+        self.role_api.create_role('fake1', role)
+        role['id'] = 'fake2'
+        self.assertRaises(exception.Conflict,
+                          self.role_api.create_role,
+                          'fake2',
+                          role)
+
+    def test_rename_duplicate_role_name_fails(self):
+        role1 = {
+            'id': 'fake1',
+            'name': 'fake1name'
+        }
+        role2 = {
+            'id': 'fake2',
+            'name': 'fake2name'
+        }
+        self.role_api.create_role('fake1', role1)
+        self.role_api.create_role('fake2', role2)
+        role1['name'] = 'fake2name'
+        self.assertRaises(exception.Conflict,
+                          self.role_api.update_role,
+                          'fake1',
+                          role1)
+
+    def test_role_crud(self):
+        role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.role_api.create_role(role['id'], role)
+        role_ref = self.role_api.get_role(role['id'])
+        role_ref_dict = {x: role_ref[x] for x in role_ref}
+        self.assertDictEqual(role_ref_dict, role)
+
+        role['name'] = uuid.uuid4().hex
+        updated_role_ref = self.role_api.update_role(role['id'], role)
+        role_ref = self.role_api.get_role(role['id'])
+        role_ref_dict = {x: role_ref[x] for x in role_ref}
+        self.assertDictEqual(role_ref_dict, role)
+        self.assertDictEqual(role_ref_dict, updated_role_ref)
+
+        self.role_api.delete_role(role['id'])
+        self.assertRaises(exception.RoleNotFound,
+                          self.role_api.get_role,
+                          role['id'])
+
+    def test_update_role_404(self):
+        role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.assertRaises(exception.RoleNotFound,
+                          self.role_api.update_role,
+                          role['id'],
+                          role)
+
+    def test_list_roles(self):
+        roles = self.role_api.list_roles()
+        self.assertEqual(len(default_fixtures.ROLES), len(roles))
+        role_ids = set(role['id'] for role in roles)
+        expected_role_ids = set(role['id'] for role in default_fixtures.ROLES)
+        self.assertEqual(expected_role_ids, role_ids)
+
+    @tests.skip_if_cache_disabled('role')
+    def test_cache_layer_role_crud(self):
+        role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        role_id = role['id']
+        # Create role
+        self.role_api.create_role(role_id, role)
+        role_ref = self.role_api.get_role(role_id)
+        updated_role_ref = copy.deepcopy(role_ref)
+        updated_role_ref['name'] = uuid.uuid4().hex
+        # Update role, bypassing the role api manager
+        self.role_api.driver.update_role(role_id, updated_role_ref)
+        # Verify get_role still returns old ref
+        self.assertDictEqual(role_ref, self.role_api.get_role(role_id))
+        # Invalidate Cache
+        self.role_api.get_role.invalidate(self.role_api, role_id)
+        # Verify get_role returns the new role_ref
+        self.assertDictEqual(updated_role_ref,
+                             self.role_api.get_role(role_id))
+        # Update role back to original via the assignment api manager
+        self.role_api.update_role(role_id, role_ref)
+        # Verify get_role returns the original role ref
+        self.assertDictEqual(role_ref, self.role_api.get_role(role_id))
+        # Delete role bypassing the role api manager
+        self.role_api.driver.delete_role(role_id)
+        # Verify get_role still returns the role_ref
+        self.assertDictEqual(role_ref, self.role_api.get_role(role_id))
+        # Invalidate cache
+        self.role_api.get_role.invalidate(self.role_api, role_id)
+        # Verify RoleNotFound is now raised
+        self.assertRaises(exception.RoleNotFound,
+                          self.role_api.get_role,
+                          role_id)
+        # recreate role
+        self.role_api.create_role(role_id, role)
+        self.role_api.get_role(role_id)
+        # delete role via the assignment api manager
+        self.role_api.delete_role(role_id)
+        # verity RoleNotFound is now raised
+        self.assertRaises(exception.RoleNotFound,
+                          self.role_api.get_role,
+                          role_id)
diff --git a/keystone-moon/keystone/tests/unit/backend/role/test_ldap.py b/keystone-moon/keystone/tests/unit/backend/role/test_ldap.py
new file mode 100644 (file)
index 0000000..ba4b7c6
--- /dev/null
@@ -0,0 +1,161 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from oslo_config import cfg
+
+from keystone import exception
+from keystone.tests import unit as tests
+from keystone.tests.unit.backend import core_ldap
+from keystone.tests.unit.backend.role import core as core_role
+from keystone.tests.unit import default_fixtures
+
+
+CONF = cfg.CONF
+
+
+class LdapRoleCommon(core_ldap.BaseBackendLdapCommon, core_role.RoleTests):
+    """Tests that should be run in every LDAP configuration.
+
+    Include additional tests that are unique to LDAP (or need to be overridden)
+    which should be run for all the various LDAP configurations we test.
+
+    """
+    pass
+
+
+class LdapRole(LdapRoleCommon, core_ldap.BaseBackendLdap, tests.TestCase):
+    """Test in an all-LDAP configuration.
+
+    Include additional tests that are unique to LDAP (or need to be overridden)
+    which only need to be run in a basic LDAP configurations.
+
+    """
+    def test_configurable_allowed_role_actions(self):
+        role = {'id': u'fäké1', 'name': u'fäké1'}
+        self.role_api.create_role(u'fäké1', role)
+        role_ref = self.role_api.get_role(u'fäké1')
+        self.assertEqual(u'fäké1', role_ref['id'])
+
+        role['name'] = u'fäké2'
+        self.role_api.update_role(u'fäké1', role)
+
+        self.role_api.delete_role(u'fäké1')
+        self.assertRaises(exception.RoleNotFound,
+                          self.role_api.get_role,
+                          u'fäké1')
+
+    def test_configurable_forbidden_role_actions(self):
+        self.config_fixture.config(
+            group='ldap', role_allow_create=False, role_allow_update=False,
+            role_allow_delete=False)
+        self.load_backends()
+
+        role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.assertRaises(exception.ForbiddenAction,
+                          self.role_api.create_role,
+                          role['id'],
+                          role)
+
+        self.role_member['name'] = uuid.uuid4().hex
+        self.assertRaises(exception.ForbiddenAction,
+                          self.role_api.update_role,
+                          self.role_member['id'],
+                          self.role_member)
+
+        self.assertRaises(exception.ForbiddenAction,
+                          self.role_api.delete_role,
+                          self.role_member['id'])
+
+    def test_role_filter(self):
+        role_ref = self.role_api.get_role(self.role_member['id'])
+        self.assertDictEqual(role_ref, self.role_member)
+
+        self.config_fixture.config(group='ldap',
+                                   role_filter='(CN=DOES_NOT_MATCH)')
+        self.load_backends()
+        # NOTE(morganfainberg): CONF.ldap.role_filter will not be
+        # dynamically changed at runtime. This invalidate is a work-around for
+        # the expectation that it is safe to change config values in tests that
+        # could affect what the drivers would return up to the manager.  This
+        # solves this assumption when working with aggressive (on-create)
+        # cache population.
+        self.role_api.get_role.invalidate(self.role_api,
+                                          self.role_member['id'])
+        self.assertRaises(exception.RoleNotFound,
+                          self.role_api.get_role,
+                          self.role_member['id'])
+
+    def test_role_attribute_mapping(self):
+        self.config_fixture.config(group='ldap', role_name_attribute='ou')
+        self.clear_database()
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+        # NOTE(morganfainberg): CONF.ldap.role_name_attribute will not be
+        # dynamically changed at runtime. This invalidate is a work-around for
+        # the expectation that it is safe to change config values in tests that
+        # could affect what the drivers would return up to the manager.  This
+        # solves this assumption when working with aggressive (on-create)
+        # cache population.
+        self.role_api.get_role.invalidate(self.role_api,
+                                          self.role_member['id'])
+        role_ref = self.role_api.get_role(self.role_member['id'])
+        self.assertEqual(self.role_member['id'], role_ref['id'])
+        self.assertEqual(self.role_member['name'], role_ref['name'])
+
+        self.config_fixture.config(group='ldap', role_name_attribute='sn')
+        self.load_backends()
+        # NOTE(morganfainberg): CONF.ldap.role_name_attribute will not be
+        # dynamically changed at runtime. This invalidate is a work-around for
+        # the expectation that it is safe to change config values in tests that
+        # could affect what the drivers would return up to the manager.  This
+        # solves this assumption when working with aggressive (on-create)
+        # cache population.
+        self.role_api.get_role.invalidate(self.role_api,
+                                          self.role_member['id'])
+        role_ref = self.role_api.get_role(self.role_member['id'])
+        self.assertEqual(self.role_member['id'], role_ref['id'])
+        self.assertNotIn('name', role_ref)
+
+    def test_role_attribute_ignore(self):
+        self.config_fixture.config(group='ldap',
+                                   role_attribute_ignore=['name'])
+        self.clear_database()
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+        # NOTE(morganfainberg): CONF.ldap.role_attribute_ignore will not be
+        # dynamically changed at runtime. This invalidate is a work-around for
+        # the expectation that it is safe to change config values in tests that
+        # could affect what the drivers would return up to the manager.  This
+        # solves this assumption when working with aggressive (on-create)
+        # cache population.
+        self.role_api.get_role.invalidate(self.role_api,
+                                          self.role_member['id'])
+        role_ref = self.role_api.get_role(self.role_member['id'])
+        self.assertEqual(self.role_member['id'], role_ref['id'])
+        self.assertNotIn('name', role_ref)
+
+
+class LdapIdentitySqlEverythingElseRole(
+    core_ldap.BaseBackendLdapIdentitySqlEverythingElse, LdapRoleCommon,
+        tests.TestCase):
+    """Test Identity in LDAP, Everything else in SQL."""
+    pass
+
+
+class LdapIdentitySqlEverythingElseWithMappingRole(
+    LdapIdentitySqlEverythingElseRole,
+        core_ldap.BaseBackendLdapIdentitySqlEverythingElseWithMapping):
+    """Test ID mapping of default LDAP backend."""
+    pass
diff --git a/keystone-moon/keystone/tests/unit/backend/role/test_sql.py b/keystone-moon/keystone/tests/unit/backend/role/test_sql.py
new file mode 100644 (file)
index 0000000..79ff148
--- /dev/null
@@ -0,0 +1,40 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from keystone.common import sql
+from keystone import exception
+from keystone.tests.unit.backend import core_sql
+from keystone.tests.unit.backend.role import core
+
+
+class SqlRoleModels(core_sql.BaseBackendSqlModels):
+
+    def test_role_model(self):
+        cols = (('id', sql.String, 64),
+                ('name', sql.String, 255))
+        self.assertExpectedSchema('role', cols)
+
+
+class SqlRole(core_sql.BaseBackendSqlTests, core.RoleTests):
+
+    def test_create_null_role_name(self):
+        role = {'id': uuid.uuid4().hex,
+                'name': None}
+        self.assertRaises(exception.UnexpectedError,
+                          self.role_api.create_role,
+                          role['id'],
+                          role)
+        self.assertRaises(exception.RoleNotFound,
+                          self.role_api.get_role,
+                          role['id'])
diff --git a/keystone-moon/keystone/tests/unit/catalog/__init__.py b/keystone-moon/keystone/tests/unit/catalog/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/tests/unit/catalog/test_core.py b/keystone-moon/keystone/tests/unit/catalog/test_core.py
new file mode 100644 (file)
index 0000000..99a3428
--- /dev/null
@@ -0,0 +1,74 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+import testtools
+
+from keystone.catalog import core
+from keystone import exception
+
+
+CONF = cfg.CONF
+
+
+class FormatUrlTests(testtools.TestCase):
+
+    def test_successful_formatting(self):
+        url_template = ('http://$(public_bind_host)s:$(admin_port)d/'
+                        '$(tenant_id)s/$(user_id)s')
+        values = {'public_bind_host': 'server', 'admin_port': 9090,
+                  'tenant_id': 'A', 'user_id': 'B'}
+        actual_url = core.format_url(url_template, values)
+
+        expected_url = 'http://server:9090/A/B'
+        self.assertEqual(actual_url, expected_url)
+
+    def test_raises_malformed_on_missing_key(self):
+        self.assertRaises(exception.MalformedEndpoint,
+                          core.format_url,
+                          "http://$(public_bind_host)s/$(public_port)d",
+                          {"public_bind_host": "1"})
+
+    def test_raises_malformed_on_wrong_type(self):
+        self.assertRaises(exception.MalformedEndpoint,
+                          core.format_url,
+                          "http://$(public_bind_host)d",
+                          {"public_bind_host": "something"})
+
+    def test_raises_malformed_on_incomplete_format(self):
+        self.assertRaises(exception.MalformedEndpoint,
+                          core.format_url,
+                          "http://$(public_bind_host)",
+                          {"public_bind_host": "1"})
+
+    def test_formatting_a_non_string(self):
+        def _test(url_template):
+            self.assertRaises(exception.MalformedEndpoint,
+                              core.format_url,
+                              url_template,
+                              {})
+
+        _test(None)
+        _test(object())
+
+    def test_substitution_with_key_not_allowed(self):
+        # If the url template contains a substitution that's not in the allowed
+        # list then MalformedEndpoint is raised.
+        # For example, admin_token isn't allowed.
+        url_template = ('http://$(public_bind_host)s:$(public_port)d/'
+                        '$(tenant_id)s/$(user_id)s/$(admin_token)s')
+        values = {'public_bind_host': 'server', 'public_port': 9090,
+                  'tenant_id': 'A', 'user_id': 'B', 'admin_token': 'C'}
+        self.assertRaises(exception.MalformedEndpoint,
+                          core.format_url,
+                          url_template,
+                          values)
diff --git a/keystone-moon/keystone/tests/unit/common/__init__.py b/keystone-moon/keystone/tests/unit/common/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/tests/unit/common/test_base64utils.py b/keystone-moon/keystone/tests/unit/common/test_base64utils.py
new file mode 100644 (file)
index 0000000..b0b7557
--- /dev/null
@@ -0,0 +1,208 @@
+# Copyright 2013 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from keystone.common import base64utils
+from keystone.tests import unit as tests
+
+base64_alphabet = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+                   'abcdefghijklmnopqrstuvwxyz'
+                   '0123456789'
+                   '+/=')       # includes pad char
+
+base64url_alphabet = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+                      'abcdefghijklmnopqrstuvwxyz'
+                      '0123456789'
+                      '-_=')    # includes pad char
+
+
+class TestValid(tests.BaseTestCase):
+    def test_valid_base64(self):
+        self.assertTrue(base64utils.is_valid_base64('+/=='))
+        self.assertTrue(base64utils.is_valid_base64('+/+='))
+        self.assertTrue(base64utils.is_valid_base64('+/+/'))
+
+        self.assertFalse(base64utils.is_valid_base64('-_=='))
+        self.assertFalse(base64utils.is_valid_base64('-_-='))
+        self.assertFalse(base64utils.is_valid_base64('-_-_'))
+
+        self.assertTrue(base64utils.is_valid_base64('abcd'))
+        self.assertFalse(base64utils.is_valid_base64('abcde'))
+        self.assertFalse(base64utils.is_valid_base64('abcde=='))
+        self.assertFalse(base64utils.is_valid_base64('abcdef'))
+        self.assertTrue(base64utils.is_valid_base64('abcdef=='))
+        self.assertFalse(base64utils.is_valid_base64('abcdefg'))
+        self.assertTrue(base64utils.is_valid_base64('abcdefg='))
+        self.assertTrue(base64utils.is_valid_base64('abcdefgh'))
+
+        self.assertFalse(base64utils.is_valid_base64('-_=='))
+
+    def test_valid_base64url(self):
+        self.assertFalse(base64utils.is_valid_base64url('+/=='))
+        self.assertFalse(base64utils.is_valid_base64url('+/+='))
+        self.assertFalse(base64utils.is_valid_base64url('+/+/'))
+
+        self.assertTrue(base64utils.is_valid_base64url('-_=='))
+        self.assertTrue(base64utils.is_valid_base64url('-_-='))
+        self.assertTrue(base64utils.is_valid_base64url('-_-_'))
+
+        self.assertTrue(base64utils.is_valid_base64url('abcd'))
+        self.assertFalse(base64utils.is_valid_base64url('abcde'))
+        self.assertFalse(base64utils.is_valid_base64url('abcde=='))
+        self.assertFalse(base64utils.is_valid_base64url('abcdef'))
+        self.assertTrue(base64utils.is_valid_base64url('abcdef=='))
+        self.assertFalse(base64utils.is_valid_base64url('abcdefg'))
+        self.assertTrue(base64utils.is_valid_base64url('abcdefg='))
+        self.assertTrue(base64utils.is_valid_base64url('abcdefgh'))
+
+        self.assertTrue(base64utils.is_valid_base64url('-_=='))
+
+
+class TestBase64Padding(tests.BaseTestCase):
+
+    def test_filter(self):
+        self.assertEqual('', base64utils.filter_formatting(''))
+        self.assertEqual('', base64utils.filter_formatting(' '))
+        self.assertEqual('a', base64utils.filter_formatting('a'))
+        self.assertEqual('a', base64utils.filter_formatting(' a'))
+        self.assertEqual('a', base64utils.filter_formatting('a '))
+        self.assertEqual('ab', base64utils.filter_formatting('ab'))
+        self.assertEqual('ab', base64utils.filter_formatting(' ab'))
+        self.assertEqual('ab', base64utils.filter_formatting('ab '))
+        self.assertEqual('ab', base64utils.filter_formatting('a b'))
+        self.assertEqual('ab', base64utils.filter_formatting(' a b'))
+        self.assertEqual('ab', base64utils.filter_formatting('a b '))
+        self.assertEqual('ab', base64utils.filter_formatting('a\nb\n '))
+
+        text = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+                'abcdefghijklmnopqrstuvwxyz'
+                '0123456789'
+                '+/=')
+        self.assertEqual(base64_alphabet,
+                         base64utils.filter_formatting(text))
+
+        text = (' ABCDEFGHIJKLMNOPQRSTUVWXYZ\n'
+                ' abcdefghijklmnopqrstuvwxyz\n'
+                '\t\f\r'
+                ' 0123456789\n'
+                ' +/=')
+        self.assertEqual(base64_alphabet,
+                         base64utils.filter_formatting(text))
+        self.assertEqual(base64url_alphabet,
+                         base64utils.base64_to_base64url(base64_alphabet))
+
+        text = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+                'abcdefghijklmnopqrstuvwxyz'
+                '0123456789'
+                '-_=')
+        self.assertEqual(base64url_alphabet,
+                         base64utils.filter_formatting(text))
+
+        text = (' ABCDEFGHIJKLMNOPQRSTUVWXYZ\n'
+                ' abcdefghijklmnopqrstuvwxyz\n'
+                '\t\f\r'
+                ' 0123456789\n'
+                '-_=')
+        self.assertEqual(base64url_alphabet,
+                         base64utils.filter_formatting(text))
+
+    def test_alphabet_conversion(self):
+        self.assertEqual(base64url_alphabet,
+                         base64utils.base64_to_base64url(base64_alphabet))
+
+        self.assertEqual(base64_alphabet,
+                         base64utils.base64url_to_base64(base64url_alphabet))
+
+    def test_is_padded(self):
+        self.assertTrue(base64utils.base64_is_padded('ABCD'))
+        self.assertTrue(base64utils.base64_is_padded('ABC='))
+        self.assertTrue(base64utils.base64_is_padded('AB=='))
+
+        self.assertTrue(base64utils.base64_is_padded('1234ABCD'))
+        self.assertTrue(base64utils.base64_is_padded('1234ABC='))
+        self.assertTrue(base64utils.base64_is_padded('1234AB=='))
+
+        self.assertFalse(base64utils.base64_is_padded('ABC'))
+        self.assertFalse(base64utils.base64_is_padded('AB'))
+        self.assertFalse(base64utils.base64_is_padded('A'))
+        self.assertFalse(base64utils.base64_is_padded(''))
+
+        self.assertRaises(base64utils.InvalidBase64Error,
+                          base64utils.base64_is_padded, '=')
+
+        self.assertRaises(base64utils.InvalidBase64Error,
+                          base64utils.base64_is_padded, 'AB=C')
+
+        self.assertRaises(base64utils.InvalidBase64Error,
+                          base64utils.base64_is_padded, 'AB=')
+
+        self.assertRaises(base64utils.InvalidBase64Error,
+                          base64utils.base64_is_padded, 'ABCD=')
+
+        self.assertRaises(ValueError, base64utils.base64_is_padded,
+                          'ABC', pad='==')
+        self.assertRaises(base64utils.InvalidBase64Error,
+                          base64utils.base64_is_padded, 'A=BC')
+
+    def test_strip_padding(self):
+        self.assertEqual('ABCD', base64utils.base64_strip_padding('ABCD'))
+        self.assertEqual('ABC', base64utils.base64_strip_padding('ABC='))
+        self.assertEqual('AB', base64utils.base64_strip_padding('AB=='))
+        self.assertRaises(ValueError, base64utils.base64_strip_padding,
+                          'ABC=', pad='==')
+        self.assertEqual('ABC', base64utils.base64_strip_padding('ABC'))
+
+    def test_assure_padding(self):
+        self.assertEqual('ABCD', base64utils.base64_assure_padding('ABCD'))
+        self.assertEqual('ABC=', base64utils.base64_assure_padding('ABC'))
+        self.assertEqual('ABC=', base64utils.base64_assure_padding('ABC='))
+        self.assertEqual('AB==', base64utils.base64_assure_padding('AB'))
+        self.assertEqual('AB==', base64utils.base64_assure_padding('AB=='))
+        self.assertRaises(ValueError, base64utils.base64_assure_padding,
+                          'ABC', pad='==')
+
+    def test_base64_percent_encoding(self):
+        self.assertEqual('ABCD', base64utils.base64url_percent_encode('ABCD'))
+        self.assertEqual('ABC%3D',
+                         base64utils.base64url_percent_encode('ABC='))
+        self.assertEqual('AB%3D%3D',
+                         base64utils.base64url_percent_encode('AB=='))
+
+        self.assertEqual('ABCD', base64utils.base64url_percent_decode('ABCD'))
+        self.assertEqual('ABC=',
+                         base64utils.base64url_percent_decode('ABC%3D'))
+        self.assertEqual('AB==',
+                         base64utils.base64url_percent_decode('AB%3D%3D'))
+        self.assertRaises(base64utils.InvalidBase64Error,
+                          base64utils.base64url_percent_encode, 'chars')
+        self.assertRaises(base64utils.InvalidBase64Error,
+                          base64utils.base64url_percent_decode, 'AB%3D%3')
+
+
+class TestTextWrap(tests.BaseTestCase):
+
+    def test_wrapping(self):
+        raw_text = 'abcdefgh'
+        wrapped_text = 'abc\ndef\ngh\n'
+
+        self.assertEqual(wrapped_text,
+                         base64utils.base64_wrap(raw_text, width=3))
+
+        t = '\n'.join(base64utils.base64_wrap_iter(raw_text, width=3)) + '\n'
+        self.assertEqual(wrapped_text, t)
+
+        raw_text = 'abcdefgh'
+        wrapped_text = 'abcd\nefgh\n'
+
+        self.assertEqual(wrapped_text,
+                         base64utils.base64_wrap(raw_text, width=4))
diff --git a/keystone-moon/keystone/tests/unit/common/test_connection_pool.py b/keystone-moon/keystone/tests/unit/common/test_connection_pool.py
new file mode 100644 (file)
index 0000000..74d0420
--- /dev/null
@@ -0,0 +1,119 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import time
+
+import mock
+from six.moves import queue
+import testtools
+from testtools import matchers
+
+from keystone.common.cache import _memcache_pool
+from keystone import exception
+from keystone.tests.unit import core
+
+
+class _TestConnectionPool(_memcache_pool.ConnectionPool):
+    destroyed_value = 'destroyed'
+
+    def _create_connection(self):
+        return mock.MagicMock()
+
+    def _destroy_connection(self, conn):
+        conn(self.destroyed_value)
+
+
+class TestConnectionPool(core.TestCase):
+    def setUp(self):
+        super(TestConnectionPool, self).setUp()
+        self.unused_timeout = 10
+        self.maxsize = 2
+        self.connection_pool = _TestConnectionPool(
+            maxsize=self.maxsize,
+            unused_timeout=self.unused_timeout)
+        self.addCleanup(self.cleanup_instance('connection_pool'))
+
+    def test_get_context_manager(self):
+        self.assertThat(self.connection_pool.queue, matchers.HasLength(0))
+        with self.connection_pool.acquire() as conn:
+            self.assertEqual(1, self.connection_pool._acquired)
+        self.assertEqual(0, self.connection_pool._acquired)
+        self.assertThat(self.connection_pool.queue, matchers.HasLength(1))
+        self.assertEqual(conn, self.connection_pool.queue[0].connection)
+
+    def test_cleanup_pool(self):
+        self.test_get_context_manager()
+        newtime = time.time() + self.unused_timeout * 2
+        non_expired_connection = _memcache_pool._PoolItem(
+            ttl=(newtime * 2),
+            connection=mock.MagicMock())
+        self.connection_pool.queue.append(non_expired_connection)
+        self.assertThat(self.connection_pool.queue, matchers.HasLength(2))
+        with mock.patch.object(time, 'time', return_value=newtime):
+            conn = self.connection_pool.queue[0].connection
+            with self.connection_pool.acquire():
+                pass
+            conn.assert_has_calls(
+                [mock.call(self.connection_pool.destroyed_value)])
+        self.assertThat(self.connection_pool.queue, matchers.HasLength(1))
+        self.assertEqual(0, non_expired_connection.connection.call_count)
+
+    def test_acquire_conn_exception_returns_acquired_count(self):
+        class TestException(Exception):
+            pass
+
+        with mock.patch.object(_TestConnectionPool, '_create_connection',
+                               side_effect=TestException):
+            with testtools.ExpectedException(TestException):
+                with self.connection_pool.acquire():
+                    pass
+            self.assertThat(self.connection_pool.queue,
+                            matchers.HasLength(0))
+            self.assertEqual(0, self.connection_pool._acquired)
+
+    def test_connection_pool_limits_maximum_connections(self):
+        # NOTE(morganfainberg): To ensure we don't lockup tests until the
+        # job limit, explicitly call .get_nowait() and .put_nowait() in this
+        # case.
+        conn1 = self.connection_pool.get_nowait()
+        conn2 = self.connection_pool.get_nowait()
+
+        # Use a nowait version to raise an Empty exception indicating we would
+        # not get another connection until one is placed back into the queue.
+        self.assertRaises(queue.Empty, self.connection_pool.get_nowait)
+
+        # Place the connections back into the pool.
+        self.connection_pool.put_nowait(conn1)
+        self.connection_pool.put_nowait(conn2)
+
+        # Make sure we can get a connection out of the pool again.
+        self.connection_pool.get_nowait()
+
+    def test_connection_pool_maximum_connection_get_timeout(self):
+        connection_pool = _TestConnectionPool(
+            maxsize=1,
+            unused_timeout=self.unused_timeout,
+            conn_get_timeout=0)
+
+        def _acquire_connection():
+            with connection_pool.acquire():
+                pass
+
+        # Make sure we've consumed the only available connection from the pool
+        conn = connection_pool.get_nowait()
+
+        self.assertRaises(exception.UnexpectedError, _acquire_connection)
+
+        # Put the connection back and ensure we can acquire the connection
+        # after it is available.
+        connection_pool.put_nowait(conn)
+        _acquire_connection()
diff --git a/keystone-moon/keystone/tests/unit/common/test_injection.py b/keystone-moon/keystone/tests/unit/common/test_injection.py
new file mode 100644 (file)
index 0000000..86bb3c2
--- /dev/null
@@ -0,0 +1,293 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from keystone.common import dependency
+from keystone.tests import unit as tests
+
+
+class TestDependencyInjection(tests.BaseTestCase):
+    def setUp(self):
+        super(TestDependencyInjection, self).setUp()
+        self.addCleanup(dependency.reset)
+
+    def test_dependency_injection(self):
+        class Interface(object):
+            def do_work(self):
+                assert False
+
+        @dependency.provider('first_api')
+        class FirstImplementation(Interface):
+            def do_work(self):
+                return True
+
+        @dependency.provider('second_api')
+        class SecondImplementation(Interface):
+            def do_work(self):
+                return True
+
+        @dependency.requires('first_api', 'second_api')
+        class Consumer(object):
+            def do_work_with_dependencies(self):
+                assert self.first_api.do_work()
+                assert self.second_api.do_work()
+
+        # initialize dependency providers
+        first_api = FirstImplementation()
+        second_api = SecondImplementation()
+
+        # ... sometime later, initialize a dependency consumer
+        consumer = Consumer()
+
+        # the expected dependencies should be available to the consumer
+        self.assertIs(consumer.first_api, first_api)
+        self.assertIs(consumer.second_api, second_api)
+        self.assertIsInstance(consumer.first_api, Interface)
+        self.assertIsInstance(consumer.second_api, Interface)
+        consumer.do_work_with_dependencies()
+
+    def test_dependency_provider_configuration(self):
+        @dependency.provider('api')
+        class Configurable(object):
+            def __init__(self, value=None):
+                self.value = value
+
+            def get_value(self):
+                return self.value
+
+        @dependency.requires('api')
+        class Consumer(object):
+            def get_value(self):
+                return self.api.get_value()
+
+        # initialize dependency providers
+        api = Configurable(value=True)
+
+        # ... sometime later, initialize a dependency consumer
+        consumer = Consumer()
+
+        # the expected dependencies should be available to the consumer
+        self.assertIs(consumer.api, api)
+        self.assertIsInstance(consumer.api, Configurable)
+        self.assertTrue(consumer.get_value())
+
+    def test_dependency_consumer_configuration(self):
+        @dependency.provider('api')
+        class Provider(object):
+            def get_value(self):
+                return True
+
+        @dependency.requires('api')
+        class Configurable(object):
+            def __init__(self, value=None):
+                self.value = value
+
+            def get_value(self):
+                if self.value:
+                    return self.api.get_value()
+
+        # initialize dependency providers
+        api = Provider()
+
+        # ... sometime later, initialize a dependency consumer
+        consumer = Configurable(value=True)
+
+        # the expected dependencies should be available to the consumer
+        self.assertIs(consumer.api, api)
+        self.assertIsInstance(consumer.api, Provider)
+        self.assertTrue(consumer.get_value())
+
+    def test_inherited_dependency(self):
+        class Interface(object):
+            def do_work(self):
+                assert False
+
+        @dependency.provider('first_api')
+        class FirstImplementation(Interface):
+            def do_work(self):
+                return True
+
+        @dependency.provider('second_api')
+        class SecondImplementation(Interface):
+            def do_work(self):
+                return True
+
+        @dependency.requires('first_api')
+        class ParentConsumer(object):
+            def do_work_with_dependencies(self):
+                assert self.first_api.do_work()
+
+        @dependency.requires('second_api')
+        class ChildConsumer(ParentConsumer):
+            def do_work_with_dependencies(self):
+                assert self.second_api.do_work()
+                super(ChildConsumer, self).do_work_with_dependencies()
+
+        # initialize dependency providers
+        first_api = FirstImplementation()
+        second_api = SecondImplementation()
+
+        # ... sometime later, initialize a dependency consumer
+        consumer = ChildConsumer()
+
+        # dependencies should be naturally inherited
+        self.assertEqual(
+            set(['first_api']),
+            ParentConsumer._dependencies)
+        self.assertEqual(
+            set(['first_api', 'second_api']),
+            ChildConsumer._dependencies)
+        self.assertEqual(
+            set(['first_api', 'second_api']),
+            consumer._dependencies)
+
+        # the expected dependencies should be available to the consumer
+        self.assertIs(consumer.first_api, first_api)
+        self.assertIs(consumer.second_api, second_api)
+        self.assertIsInstance(consumer.first_api, Interface)
+        self.assertIsInstance(consumer.second_api, Interface)
+        consumer.do_work_with_dependencies()
+
+    def test_unresolvable_dependency(self):
+        @dependency.requires(uuid.uuid4().hex)
+        class Consumer(object):
+            pass
+
+        def for_test():
+            Consumer()
+            dependency.resolve_future_dependencies()
+
+        self.assertRaises(dependency.UnresolvableDependencyException, for_test)
+
+    def test_circular_dependency(self):
+        p1_name = uuid.uuid4().hex
+        p2_name = uuid.uuid4().hex
+
+        @dependency.provider(p1_name)
+        @dependency.requires(p2_name)
+        class P1(object):
+            pass
+
+        @dependency.provider(p2_name)
+        @dependency.requires(p1_name)
+        class P2(object):
+            pass
+
+        p1 = P1()
+        p2 = P2()
+
+        dependency.resolve_future_dependencies()
+
+        self.assertIs(getattr(p1, p2_name), p2)
+        self.assertIs(getattr(p2, p1_name), p1)
+
+    def test_reset(self):
+        # Can reset the registry of providers.
+
+        p_id = uuid.uuid4().hex
+
+        @dependency.provider(p_id)
+        class P(object):
+            pass
+
+        p_inst = P()
+
+        self.assertIs(dependency.get_provider(p_id), p_inst)
+
+        dependency.reset()
+
+        self.assertFalse(dependency._REGISTRY)
+
+    def test_optional_dependency_not_provided(self):
+        requirement_name = uuid.uuid4().hex
+
+        @dependency.optional(requirement_name)
+        class C1(object):
+            pass
+
+        c1_inst = C1()
+
+        dependency.resolve_future_dependencies()
+
+        self.assertIsNone(getattr(c1_inst, requirement_name))
+
+    def test_optional_dependency_provided(self):
+        requirement_name = uuid.uuid4().hex
+
+        @dependency.optional(requirement_name)
+        class C1(object):
+            pass
+
+        @dependency.provider(requirement_name)
+        class P1(object):
+            pass
+
+        c1_inst = C1()
+        p1_inst = P1()
+
+        dependency.resolve_future_dependencies()
+
+        self.assertIs(getattr(c1_inst, requirement_name), p1_inst)
+
+    def test_optional_and_required(self):
+        p1_name = uuid.uuid4().hex
+        p2_name = uuid.uuid4().hex
+        optional_name = uuid.uuid4().hex
+
+        @dependency.provider(p1_name)
+        @dependency.requires(p2_name)
+        @dependency.optional(optional_name)
+        class P1(object):
+            pass
+
+        @dependency.provider(p2_name)
+        @dependency.requires(p1_name)
+        class P2(object):
+            pass
+
+        p1 = P1()
+        p2 = P2()
+
+        dependency.resolve_future_dependencies()
+
+        self.assertIs(getattr(p1, p2_name), p2)
+        self.assertIs(getattr(p2, p1_name), p1)
+        self.assertIsNone(getattr(p1, optional_name))
+
+    def test_get_provider(self):
+        # Can get the instance of a provider using get_provider
+
+        provider_name = uuid.uuid4().hex
+
+        @dependency.provider(provider_name)
+        class P(object):
+            pass
+
+        provider_instance = P()
+        retrieved_provider_instance = dependency.get_provider(provider_name)
+        self.assertIs(provider_instance, retrieved_provider_instance)
+
+    def test_get_provider_not_provided_error(self):
+        # If no provider and provider is required then fails.
+
+        provider_name = uuid.uuid4().hex
+        self.assertRaises(KeyError, dependency.get_provider, provider_name)
+
+    def test_get_provider_not_provided_optional(self):
+        # If no provider and provider is optional then returns None.
+
+        provider_name = uuid.uuid4().hex
+        self.assertIsNone(dependency.get_provider(provider_name,
+                                                  dependency.GET_OPTIONAL))
diff --git a/keystone-moon/keystone/tests/unit/common/test_json_home.py b/keystone-moon/keystone/tests/unit/common/test_json_home.py
new file mode 100644 (file)
index 0000000..fb7f844
--- /dev/null
@@ -0,0 +1,91 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import copy
+
+from testtools import matchers
+
+from keystone.common import json_home
+from keystone.tests import unit as tests
+
+
+class JsonHomeTest(tests.BaseTestCase):
+    def test_build_v3_resource_relation(self):
+        resource_name = self.getUniqueString()
+        relation = json_home.build_v3_resource_relation(resource_name)
+        exp_relation = (
+            'http://docs.openstack.org/api/openstack-identity/3/rel/%s' %
+            resource_name)
+        self.assertThat(relation, matchers.Equals(exp_relation))
+
+    def test_build_v3_extension_resource_relation(self):
+        extension_name = self.getUniqueString()
+        extension_version = self.getUniqueString()
+        resource_name = self.getUniqueString()
+        relation = json_home.build_v3_extension_resource_relation(
+            extension_name, extension_version, resource_name)
+        exp_relation = (
+            'http://docs.openstack.org/api/openstack-identity/3/ext/%s/%s/rel/'
+            '%s' % (extension_name, extension_version, resource_name))
+        self.assertThat(relation, matchers.Equals(exp_relation))
+
+    def test_build_v3_parameter_relation(self):
+        parameter_name = self.getUniqueString()
+        relation = json_home.build_v3_parameter_relation(parameter_name)
+        exp_relation = (
+            'http://docs.openstack.org/api/openstack-identity/3/param/%s' %
+            parameter_name)
+        self.assertThat(relation, matchers.Equals(exp_relation))
+
+    def test_build_v3_extension_parameter_relation(self):
+        extension_name = self.getUniqueString()
+        extension_version = self.getUniqueString()
+        parameter_name = self.getUniqueString()
+        relation = json_home.build_v3_extension_parameter_relation(
+            extension_name, extension_version, parameter_name)
+        exp_relation = (
+            'http://docs.openstack.org/api/openstack-identity/3/ext/%s/%s/'
+            'param/%s' % (extension_name, extension_version, parameter_name))
+        self.assertThat(relation, matchers.Equals(exp_relation))
+
+    def test_translate_urls(self):
+        href_rel = self.getUniqueString()
+        href = self.getUniqueString()
+        href_template_rel = self.getUniqueString()
+        href_template = self.getUniqueString()
+        href_vars = {self.getUniqueString(): self.getUniqueString()}
+        original_json_home = {
+            'resources': {
+                href_rel: {'href': href},
+                href_template_rel: {
+                    'href-template': href_template,
+                    'href-vars': href_vars}
+            }
+        }
+
+        new_json_home = copy.deepcopy(original_json_home)
+        new_prefix = self.getUniqueString()
+        json_home.translate_urls(new_json_home, new_prefix)
+
+        exp_json_home = {
+            'resources': {
+                href_rel: {'href': new_prefix + href},
+                href_template_rel: {
+                    'href-template': new_prefix + href_template,
+                    'href-vars': href_vars}
+            }
+        }
+
+        self.assertThat(new_json_home, matchers.Equals(exp_json_home))
diff --git a/keystone-moon/keystone/tests/unit/common/test_ldap.py b/keystone-moon/keystone/tests/unit/common/test_ldap.py
new file mode 100644 (file)
index 0000000..4156889
--- /dev/null
@@ -0,0 +1,502 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import ldap.dn
+import mock
+from oslo_config import cfg
+from testtools import matchers
+
+import os
+import shutil
+import tempfile
+
+from keystone.common import ldap as ks_ldap
+from keystone.common.ldap import core as common_ldap_core
+from keystone.tests import unit as tests
+from keystone.tests.unit import default_fixtures
+from keystone.tests.unit import fakeldap
+
+CONF = cfg.CONF
+
+
+class DnCompareTest(tests.BaseTestCase):
+    """Tests for the DN comparison functions in keystone.common.ldap.core."""
+
+    def test_prep(self):
+        # prep_case_insensitive returns the string with spaces at the front and
+        # end if it's already lowercase and no insignificant characters.
+        value = 'lowercase value'
+        self.assertEqual(value, ks_ldap.prep_case_insensitive(value))
+
+    def test_prep_lowercase(self):
+        # prep_case_insensitive returns the string with spaces at the front and
+        # end and lowercases the value.
+        value = 'UPPERCASE VALUE'
+        exp_value = value.lower()
+        self.assertEqual(exp_value, ks_ldap.prep_case_insensitive(value))
+
+    def test_prep_insignificant(self):
+        # prep_case_insensitive remove insignificant spaces.
+        value = 'before   after'
+        exp_value = 'before after'
+        self.assertEqual(exp_value, ks_ldap.prep_case_insensitive(value))
+
+    def test_prep_insignificant_pre_post(self):
+        # prep_case_insensitive remove insignificant spaces.
+        value = '   value   '
+        exp_value = 'value'
+        self.assertEqual(exp_value, ks_ldap.prep_case_insensitive(value))
+
+    def test_ava_equal_same(self):
+        # is_ava_value_equal returns True if the two values are the same.
+        value = 'val1'
+        self.assertTrue(ks_ldap.is_ava_value_equal('cn', value, value))
+
+    def test_ava_equal_complex(self):
+        # is_ava_value_equal returns True if the two values are the same using
+        # a value that's got different capitalization and insignificant chars.
+        val1 = 'before   after'
+        val2 = '  BEFORE  afTer '
+        self.assertTrue(ks_ldap.is_ava_value_equal('cn', val1, val2))
+
+    def test_ava_different(self):
+        # is_ava_value_equal returns False if the values aren't the same.
+        self.assertFalse(ks_ldap.is_ava_value_equal('cn', 'val1', 'val2'))
+
+    def test_rdn_same(self):
+        # is_rdn_equal returns True if the two values are the same.
+        rdn = ldap.dn.str2dn('cn=val1')[0]
+        self.assertTrue(ks_ldap.is_rdn_equal(rdn, rdn))
+
+    def test_rdn_diff_length(self):
+        # is_rdn_equal returns False if the RDNs have a different number of
+        # AVAs.
+        rdn1 = ldap.dn.str2dn('cn=cn1')[0]
+        rdn2 = ldap.dn.str2dn('cn=cn1+ou=ou1')[0]
+        self.assertFalse(ks_ldap.is_rdn_equal(rdn1, rdn2))
+
+    def test_rdn_multi_ava_same_order(self):
+        # is_rdn_equal returns True if the RDNs have the same number of AVAs
+        # and the values are the same.
+        rdn1 = ldap.dn.str2dn('cn=cn1+ou=ou1')[0]
+        rdn2 = ldap.dn.str2dn('cn=CN1+ou=OU1')[0]
+        self.assertTrue(ks_ldap.is_rdn_equal(rdn1, rdn2))
+
+    def test_rdn_multi_ava_diff_order(self):
+        # is_rdn_equal returns True if the RDNs have the same number of AVAs
+        # and the values are the same, even if in a different order
+        rdn1 = ldap.dn.str2dn('cn=cn1+ou=ou1')[0]
+        rdn2 = ldap.dn.str2dn('ou=OU1+cn=CN1')[0]
+        self.assertTrue(ks_ldap.is_rdn_equal(rdn1, rdn2))
+
+    def test_rdn_multi_ava_diff_type(self):
+        # is_rdn_equal returns False if the RDNs have the same number of AVAs
+        # and the attribute types are different.
+        rdn1 = ldap.dn.str2dn('cn=cn1+ou=ou1')[0]
+        rdn2 = ldap.dn.str2dn('cn=cn1+sn=sn1')[0]
+        self.assertFalse(ks_ldap.is_rdn_equal(rdn1, rdn2))
+
+    def test_rdn_attr_type_case_diff(self):
+        # is_rdn_equal returns True for same RDNs even when attr type case is
+        # different.
+        rdn1 = ldap.dn.str2dn('cn=cn1')[0]
+        rdn2 = ldap.dn.str2dn('CN=cn1')[0]
+        self.assertTrue(ks_ldap.is_rdn_equal(rdn1, rdn2))
+
+    def test_rdn_attr_type_alias(self):
+        # is_rdn_equal returns False for same RDNs even when attr type alias is
+        # used. Note that this is a limitation since an LDAP server should
+        # consider them equal.
+        rdn1 = ldap.dn.str2dn('cn=cn1')[0]
+        rdn2 = ldap.dn.str2dn('2.5.4.3=cn1')[0]
+        self.assertFalse(ks_ldap.is_rdn_equal(rdn1, rdn2))
+
+    def test_dn_same(self):
+        # is_dn_equal returns True if the DNs are the same.
+        dn = 'cn=Babs Jansen,ou=OpenStack'
+        self.assertTrue(ks_ldap.is_dn_equal(dn, dn))
+
+    def test_dn_equal_unicode(self):
+        # is_dn_equal can accept unicode
+        dn = u'cn=fäké,ou=OpenStack'
+        self.assertTrue(ks_ldap.is_dn_equal(dn, dn))
+
+    def test_dn_diff_length(self):
+        # is_dn_equal returns False if the DNs don't have the same number of
+        # RDNs
+        dn1 = 'cn=Babs Jansen,ou=OpenStack'
+        dn2 = 'cn=Babs Jansen,ou=OpenStack,dc=example.com'
+        self.assertFalse(ks_ldap.is_dn_equal(dn1, dn2))
+
+    def test_dn_equal_rdns(self):
+        # is_dn_equal returns True if the DNs have the same number of RDNs
+        # and each RDN is the same.
+        dn1 = 'cn=Babs Jansen,ou=OpenStack+cn=OpenSource'
+        dn2 = 'CN=Babs Jansen,cn=OpenSource+ou=OpenStack'
+        self.assertTrue(ks_ldap.is_dn_equal(dn1, dn2))
+
+    def test_dn_parsed_dns(self):
+        # is_dn_equal can also accept parsed DNs.
+        dn_str1 = ldap.dn.str2dn('cn=Babs Jansen,ou=OpenStack+cn=OpenSource')
+        dn_str2 = ldap.dn.str2dn('CN=Babs Jansen,cn=OpenSource+ou=OpenStack')
+        self.assertTrue(ks_ldap.is_dn_equal(dn_str1, dn_str2))
+
+    def test_startswith_under_child(self):
+        # dn_startswith returns True if descendant_dn is a child of dn.
+        child = 'cn=Babs Jansen,ou=OpenStack'
+        parent = 'ou=OpenStack'
+        self.assertTrue(ks_ldap.dn_startswith(child, parent))
+
+    def test_startswith_parent(self):
+        # dn_startswith returns False if descendant_dn is a parent of dn.
+        child = 'cn=Babs Jansen,ou=OpenStack'
+        parent = 'ou=OpenStack'
+        self.assertFalse(ks_ldap.dn_startswith(parent, child))
+
+    def test_startswith_same(self):
+        # dn_startswith returns False if DNs are the same.
+        dn = 'cn=Babs Jansen,ou=OpenStack'
+        self.assertFalse(ks_ldap.dn_startswith(dn, dn))
+
+    def test_startswith_not_parent(self):
+        # dn_startswith returns False if descendant_dn is not under the dn
+        child = 'cn=Babs Jansen,ou=OpenStack'
+        parent = 'dc=example.com'
+        self.assertFalse(ks_ldap.dn_startswith(child, parent))
+
+    def test_startswith_descendant(self):
+        # dn_startswith returns True if descendant_dn is a descendant of dn.
+        descendant = 'cn=Babs Jansen,ou=Keystone,ou=OpenStack,dc=example.com'
+        dn = 'ou=OpenStack,dc=example.com'
+        self.assertTrue(ks_ldap.dn_startswith(descendant, dn))
+
+        descendant = 'uid=12345,ou=Users,dc=example,dc=com'
+        dn = 'ou=Users,dc=example,dc=com'
+        self.assertTrue(ks_ldap.dn_startswith(descendant, dn))
+
+    def test_startswith_parsed_dns(self):
+        # dn_startswith also accepts parsed DNs.
+        descendant = ldap.dn.str2dn('cn=Babs Jansen,ou=OpenStack')
+        dn = ldap.dn.str2dn('ou=OpenStack')
+        self.assertTrue(ks_ldap.dn_startswith(descendant, dn))
+
+    def test_startswith_unicode(self):
+        # dn_startswith accepts unicode.
+        child = u'cn=cn=fäké,ou=OpenStäck'
+        parent = 'ou=OpenStäck'
+        self.assertTrue(ks_ldap.dn_startswith(child, parent))
+
+
+class LDAPDeleteTreeTest(tests.TestCase):
+
+    def setUp(self):
+        super(LDAPDeleteTreeTest, self).setUp()
+
+        ks_ldap.register_handler('fake://',
+                                 fakeldap.FakeLdapNoSubtreeDelete)
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+
+        self.addCleanup(self.clear_database)
+        self.addCleanup(common_ldap_core._HANDLERS.clear)
+
+    def clear_database(self):
+        for shelf in fakeldap.FakeShelves:
+            fakeldap.FakeShelves[shelf].clear()
+
+    def config_overrides(self):
+        super(LDAPDeleteTreeTest, self).config_overrides()
+        self.config_fixture.config(
+            group='identity',
+            driver='keystone.identity.backends.ldap.Identity')
+
+    def config_files(self):
+        config_files = super(LDAPDeleteTreeTest, self).config_files()
+        config_files.append(tests.dirs.tests_conf('backend_ldap.conf'))
+        return config_files
+
+    def test_deleteTree(self):
+        """Test manually deleting a tree.
+
+        Few LDAP servers support CONTROL_DELETETREE.  This test
+        exercises the alternate code paths in BaseLdap.deleteTree.
+
+        """
+        conn = self.identity_api.user.get_connection()
+        id_attr = self.identity_api.user.id_attr
+        objclass = self.identity_api.user.object_class.lower()
+        tree_dn = self.identity_api.user.tree_dn
+
+        def create_entry(name, parent_dn=None):
+            if not parent_dn:
+                parent_dn = tree_dn
+            dn = '%s=%s,%s' % (id_attr, name, parent_dn)
+            attrs = [('objectclass', [objclass, 'ldapsubentry']),
+                     (id_attr, [name])]
+            conn.add_s(dn, attrs)
+            return dn
+
+        # create 3 entries like this:
+        # cn=base
+        # cn=child,cn=base
+        # cn=grandchild,cn=child,cn=base
+        # then attempt to deleteTree(cn=base)
+        base_id = 'base'
+        base_dn = create_entry(base_id)
+        child_dn = create_entry('child', base_dn)
+        grandchild_dn = create_entry('grandchild', child_dn)
+
+        # verify that the three entries were created
+        scope = ldap.SCOPE_SUBTREE
+        filt = '(|(objectclass=*)(objectclass=ldapsubentry))'
+        entries = conn.search_s(base_dn, scope, filt,
+                                attrlist=common_ldap_core.DN_ONLY)
+        self.assertThat(entries, matchers.HasLength(3))
+        sort_ents = sorted([e[0] for e in entries], key=len, reverse=True)
+        self.assertEqual([grandchild_dn, child_dn, base_dn], sort_ents)
+
+        # verify that a non-leaf node can't be deleted directly by the
+        # LDAP server
+        self.assertRaises(ldap.NOT_ALLOWED_ON_NONLEAF,
+                          conn.delete_s, base_dn)
+        self.assertRaises(ldap.NOT_ALLOWED_ON_NONLEAF,
+                          conn.delete_s, child_dn)
+
+        # call our deleteTree implementation
+        self.identity_api.user.deleteTree(base_id)
+        self.assertRaises(ldap.NO_SUCH_OBJECT,
+                          conn.search_s, base_dn, ldap.SCOPE_BASE)
+        self.assertRaises(ldap.NO_SUCH_OBJECT,
+                          conn.search_s, child_dn, ldap.SCOPE_BASE)
+        self.assertRaises(ldap.NO_SUCH_OBJECT,
+                          conn.search_s, grandchild_dn, ldap.SCOPE_BASE)
+
+
+class SslTlsTest(tests.TestCase):
+    """Tests for the SSL/TLS functionality in keystone.common.ldap.core."""
+
+    @mock.patch.object(ks_ldap.core.KeystoneLDAPHandler, 'simple_bind_s')
+    @mock.patch.object(ldap.ldapobject.LDAPObject, 'start_tls_s')
+    def _init_ldap_connection(self, config, mock_ldap_one, mock_ldap_two):
+        # Attempt to connect to initialize python-ldap.
+        base_ldap = ks_ldap.BaseLdap(config)
+        base_ldap.get_connection()
+
+    def test_certfile_trust_tls(self):
+        # We need this to actually exist, so we create a tempfile.
+        (handle, certfile) = tempfile.mkstemp()
+        self.addCleanup(os.unlink, certfile)
+        self.addCleanup(os.close, handle)
+        self.config_fixture.config(group='ldap',
+                                   url='ldap://localhost',
+                                   use_tls=True,
+                                   tls_cacertfile=certfile)
+
+        self._init_ldap_connection(CONF)
+
+        # Ensure the cert trust option is set.
+        self.assertEqual(certfile, ldap.get_option(ldap.OPT_X_TLS_CACERTFILE))
+
+    def test_certdir_trust_tls(self):
+        # We need this to actually exist, so we create a tempdir.
+        certdir = tempfile.mkdtemp()
+        self.addCleanup(shutil.rmtree, certdir)
+        self.config_fixture.config(group='ldap',
+                                   url='ldap://localhost',
+                                   use_tls=True,
+                                   tls_cacertdir=certdir)
+
+        self._init_ldap_connection(CONF)
+
+        # Ensure the cert trust option is set.
+        self.assertEqual(certdir, ldap.get_option(ldap.OPT_X_TLS_CACERTDIR))
+
+    def test_certfile_trust_ldaps(self):
+        # We need this to actually exist, so we create a tempfile.
+        (handle, certfile) = tempfile.mkstemp()
+        self.addCleanup(os.unlink, certfile)
+        self.addCleanup(os.close, handle)
+        self.config_fixture.config(group='ldap',
+                                   url='ldaps://localhost',
+                                   use_tls=False,
+                                   tls_cacertfile=certfile)
+
+        self._init_ldap_connection(CONF)
+
+        # Ensure the cert trust option is set.
+        self.assertEqual(certfile, ldap.get_option(ldap.OPT_X_TLS_CACERTFILE))
+
+    def test_certdir_trust_ldaps(self):
+        # We need this to actually exist, so we create a tempdir.
+        certdir = tempfile.mkdtemp()
+        self.addCleanup(shutil.rmtree, certdir)
+        self.config_fixture.config(group='ldap',
+                                   url='ldaps://localhost',
+                                   use_tls=False,
+                                   tls_cacertdir=certdir)
+
+        self._init_ldap_connection(CONF)
+
+        # Ensure the cert trust option is set.
+        self.assertEqual(certdir, ldap.get_option(ldap.OPT_X_TLS_CACERTDIR))
+
+
+class LDAPPagedResultsTest(tests.TestCase):
+    """Tests the paged results functionality in keystone.common.ldap.core."""
+
+    def setUp(self):
+        super(LDAPPagedResultsTest, self).setUp()
+        self.clear_database()
+
+        ks_ldap.register_handler('fake://', fakeldap.FakeLdap)
+        self.addCleanup(common_ldap_core._HANDLERS.clear)
+
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+
+    def clear_database(self):
+        for shelf in fakeldap.FakeShelves:
+            fakeldap.FakeShelves[shelf].clear()
+
+    def config_overrides(self):
+        super(LDAPPagedResultsTest, self).config_overrides()
+        self.config_fixture.config(
+            group='identity',
+            driver='keystone.identity.backends.ldap.Identity')
+
+    def config_files(self):
+        config_files = super(LDAPPagedResultsTest, self).config_files()
+        config_files.append(tests.dirs.tests_conf('backend_ldap.conf'))
+        return config_files
+
+    @mock.patch.object(fakeldap.FakeLdap, 'search_ext')
+    @mock.patch.object(fakeldap.FakeLdap, 'result3')
+    def test_paged_results_control_api(self, mock_result3, mock_search_ext):
+        mock_result3.return_value = ('', [], 1, [])
+
+        self.config_fixture.config(group='ldap',
+                                   page_size=1)
+
+        conn = self.identity_api.user.get_connection()
+        conn._paged_search_s('dc=example,dc=test',
+                             ldap.SCOPE_SUBTREE,
+                             'objectclass=*')
+
+
+class CommonLdapTestCase(tests.BaseTestCase):
+    """These test cases call functions in keystone.common.ldap."""
+
+    def test_binary_attribute_values(self):
+        result = [(
+            'cn=junk,dc=example,dc=com',
+            {
+                'cn': ['junk'],
+                'sn': [uuid.uuid4().hex],
+                'mail': [uuid.uuid4().hex],
+                'binary_attr': ['\x00\xFF\x00\xFF']
+            }
+        ), ]
+        py_result = ks_ldap.convert_ldap_result(result)
+        # The attribute containing the binary value should
+        # not be present in the converted result.
+        self.assertNotIn('binary_attr', py_result[0][1])
+
+    def test_utf8_conversion(self):
+        value_unicode = u'fäké1'
+        value_utf8 = value_unicode.encode('utf-8')
+
+        result_utf8 = ks_ldap.utf8_encode(value_unicode)
+        self.assertEqual(value_utf8, result_utf8)
+
+        result_utf8 = ks_ldap.utf8_encode(value_utf8)
+        self.assertEqual(value_utf8, result_utf8)
+
+        result_unicode = ks_ldap.utf8_decode(value_utf8)
+        self.assertEqual(value_unicode, result_unicode)
+
+        result_unicode = ks_ldap.utf8_decode(value_unicode)
+        self.assertEqual(value_unicode, result_unicode)
+
+        self.assertRaises(TypeError,
+                          ks_ldap.utf8_encode,
+                          100)
+
+        result_unicode = ks_ldap.utf8_decode(100)
+        self.assertEqual(u'100', result_unicode)
+
+    def test_user_id_begins_with_0(self):
+        user_id = '0123456'
+        result = [(
+            'cn=dummy,dc=example,dc=com',
+            {
+                'user_id': [user_id],
+                'enabled': ['TRUE']
+            }
+        ), ]
+        py_result = ks_ldap.convert_ldap_result(result)
+        # The user id should be 0123456, and the enabled
+        # flag should be True
+        self.assertIs(py_result[0][1]['enabled'][0], True)
+        self.assertEqual(user_id, py_result[0][1]['user_id'][0])
+
+    def test_user_id_begins_with_0_and_enabled_bit_mask(self):
+        user_id = '0123456'
+        bitmask = '225'
+        expected_bitmask = 225
+        result = [(
+            'cn=dummy,dc=example,dc=com',
+            {
+                'user_id': [user_id],
+                'enabled': [bitmask]
+            }
+        ), ]
+        py_result = ks_ldap.convert_ldap_result(result)
+        # The user id should be 0123456, and the enabled
+        # flag should be 225
+        self.assertEqual(expected_bitmask, py_result[0][1]['enabled'][0])
+        self.assertEqual(user_id, py_result[0][1]['user_id'][0])
+
+    def test_user_id_and_bitmask_begins_with_0(self):
+        user_id = '0123456'
+        bitmask = '0225'
+        expected_bitmask = 225
+        result = [(
+            'cn=dummy,dc=example,dc=com',
+            {
+                'user_id': [user_id],
+                'enabled': [bitmask]
+            }
+        ), ]
+        py_result = ks_ldap.convert_ldap_result(result)
+        # The user id should be 0123456, and the enabled
+        # flag should be 225, the 0 is dropped.
+        self.assertEqual(expected_bitmask, py_result[0][1]['enabled'][0])
+        self.assertEqual(user_id, py_result[0][1]['user_id'][0])
+
+    def test_user_id_and_user_name_with_boolean_string(self):
+        boolean_strings = ['TRUE', 'FALSE', 'true', 'false', 'True', 'False',
+                           'TrUe' 'FaLse']
+        for user_name in boolean_strings:
+            user_id = uuid.uuid4().hex
+            result = [(
+                'cn=dummy,dc=example,dc=com',
+                {
+                    'user_id': [user_id],
+                    'user_name': [user_name]
+                }
+            ), ]
+            py_result = ks_ldap.convert_ldap_result(result)
+            # The user name should still be a string value.
+            self.assertEqual(user_name, py_result[0][1]['user_name'][0])
diff --git a/keystone-moon/keystone/tests/unit/common/test_notifications.py b/keystone-moon/keystone/tests/unit/common/test_notifications.py
new file mode 100644 (file)
index 0000000..55dd556
--- /dev/null
@@ -0,0 +1,974 @@
+# Copyright 2013 IBM Corp.
+#
+#   Licensed under the Apache License, Version 2.0 (the "License"); you may
+#   not use this file except in compliance with the License. You may obtain
+#   a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#   WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#   License for the specific language governing permissions and limitations
+#   under the License.
+
+import logging
+import uuid
+
+import mock
+from oslo_config import cfg
+from oslo_config import fixture as config_fixture
+from oslotest import mockpatch
+from pycadf import cadftaxonomy
+from pycadf import cadftype
+from pycadf import eventfactory
+from pycadf import resource as cadfresource
+import testtools
+
+from keystone.common import dependency
+from keystone import notifications
+from keystone.tests.unit import test_v3
+
+
+CONF = cfg.CONF
+
+EXP_RESOURCE_TYPE = uuid.uuid4().hex
+CREATED_OPERATION = notifications.ACTIONS.created
+UPDATED_OPERATION = notifications.ACTIONS.updated
+DELETED_OPERATION = notifications.ACTIONS.deleted
+DISABLED_OPERATION = notifications.ACTIONS.disabled
+
+
+class ArbitraryException(Exception):
+    pass
+
+
+def register_callback(operation, resource_type=EXP_RESOURCE_TYPE):
+    """Helper for creating and registering a mock callback.
+
+    """
+    callback = mock.Mock(__name__='callback',
+                         im_class=mock.Mock(__name__='class'))
+    notifications.register_event_callback(operation, resource_type, callback)
+    return callback
+
+
+class AuditNotificationsTestCase(testtools.TestCase):
+    def setUp(self):
+        super(AuditNotificationsTestCase, self).setUp()
+        self.config_fixture = self.useFixture(config_fixture.Config(CONF))
+        self.addCleanup(notifications.clear_subscribers)
+
+    def _test_notification_operation(self, notify_function, operation):
+        exp_resource_id = uuid.uuid4().hex
+        callback = register_callback(operation)
+        notify_function(EXP_RESOURCE_TYPE, exp_resource_id)
+        callback.assert_called_once_with('identity', EXP_RESOURCE_TYPE,
+                                         operation,
+                                         {'resource_info': exp_resource_id})
+        self.config_fixture.config(notification_format='cadf')
+        with mock.patch(
+                'keystone.notifications._create_cadf_payload') as cadf_notify:
+            notify_function(EXP_RESOURCE_TYPE, exp_resource_id)
+            initiator = None
+            cadf_notify.assert_called_once_with(
+                operation, EXP_RESOURCE_TYPE, exp_resource_id,
+                notifications.taxonomy.OUTCOME_SUCCESS, initiator)
+            notify_function(EXP_RESOURCE_TYPE, exp_resource_id, public=False)
+            cadf_notify.assert_called_once_with(
+                operation, EXP_RESOURCE_TYPE, exp_resource_id,
+                notifications.taxonomy.OUTCOME_SUCCESS, initiator)
+
+    def test_resource_created_notification(self):
+        self._test_notification_operation(notifications.Audit.created,
+                                          CREATED_OPERATION)
+
+    def test_resource_updated_notification(self):
+        self._test_notification_operation(notifications.Audit.updated,
+                                          UPDATED_OPERATION)
+
+    def test_resource_deleted_notification(self):
+        self._test_notification_operation(notifications.Audit.deleted,
+                                          DELETED_OPERATION)
+
+    def test_resource_disabled_notification(self):
+        self._test_notification_operation(notifications.Audit.disabled,
+                                          DISABLED_OPERATION)
+
+
+class NotificationsWrapperTestCase(testtools.TestCase):
+    def create_fake_ref(self):
+        resource_id = uuid.uuid4().hex
+        return resource_id, {
+            'id': resource_id,
+            'key': uuid.uuid4().hex
+        }
+
+    @notifications.created(EXP_RESOURCE_TYPE)
+    def create_resource(self, resource_id, data):
+        return data
+
+    def test_resource_created_notification(self):
+        exp_resource_id, data = self.create_fake_ref()
+        callback = register_callback(CREATED_OPERATION)
+
+        self.create_resource(exp_resource_id, data)
+        callback.assert_called_with('identity', EXP_RESOURCE_TYPE,
+                                    CREATED_OPERATION,
+                                    {'resource_info': exp_resource_id})
+
+    @notifications.updated(EXP_RESOURCE_TYPE)
+    def update_resource(self, resource_id, data):
+        return data
+
+    def test_resource_updated_notification(self):
+        exp_resource_id, data = self.create_fake_ref()
+        callback = register_callback(UPDATED_OPERATION)
+
+        self.update_resource(exp_resource_id, data)
+        callback.assert_called_with('identity', EXP_RESOURCE_TYPE,
+                                    UPDATED_OPERATION,
+                                    {'resource_info': exp_resource_id})
+
+    @notifications.deleted(EXP_RESOURCE_TYPE)
+    def delete_resource(self, resource_id):
+        pass
+
+    def test_resource_deleted_notification(self):
+        exp_resource_id = uuid.uuid4().hex
+        callback = register_callback(DELETED_OPERATION)
+
+        self.delete_resource(exp_resource_id)
+        callback.assert_called_with('identity', EXP_RESOURCE_TYPE,
+                                    DELETED_OPERATION,
+                                    {'resource_info': exp_resource_id})
+
+    @notifications.created(EXP_RESOURCE_TYPE)
+    def create_exception(self, resource_id):
+        raise ArbitraryException()
+
+    def test_create_exception_without_notification(self):
+        callback = register_callback(CREATED_OPERATION)
+        self.assertRaises(
+            ArbitraryException, self.create_exception, uuid.uuid4().hex)
+        self.assertFalse(callback.called)
+
+    @notifications.created(EXP_RESOURCE_TYPE)
+    def update_exception(self, resource_id):
+        raise ArbitraryException()
+
+    def test_update_exception_without_notification(self):
+        callback = register_callback(UPDATED_OPERATION)
+        self.assertRaises(
+            ArbitraryException, self.update_exception, uuid.uuid4().hex)
+        self.assertFalse(callback.called)
+
+    @notifications.deleted(EXP_RESOURCE_TYPE)
+    def delete_exception(self, resource_id):
+        raise ArbitraryException()
+
+    def test_delete_exception_without_notification(self):
+        callback = register_callback(DELETED_OPERATION)
+        self.assertRaises(
+            ArbitraryException, self.delete_exception, uuid.uuid4().hex)
+        self.assertFalse(callback.called)
+
+
+class NotificationsTestCase(testtools.TestCase):
+    def setUp(self):
+        super(NotificationsTestCase, self).setUp()
+
+        # these should use self.config_fixture.config(), but they haven't
+        # been registered yet
+        CONF.rpc_backend = 'fake'
+        CONF.notification_driver = ['fake']
+
+    def test_send_notification(self):
+        """Test the private method _send_notification to ensure event_type,
+           payload, and context are built and passed properly.
+        """
+        resource = uuid.uuid4().hex
+        resource_type = EXP_RESOURCE_TYPE
+        operation = CREATED_OPERATION
+
+        # NOTE(ldbragst): Even though notifications._send_notification doesn't
+        # contain logic that creates cases, this is supposed to test that
+        # context is always empty and that we ensure the resource ID of the
+        # resource in the notification is contained in the payload. It was
+        # agreed that context should be empty in Keystone's case, which is
+        # also noted in the /keystone/notifications.py module. This test
+        # ensures and maintains these conditions.
+        expected_args = [
+            {},  # empty context
+            'identity.%s.created' % resource_type,  # event_type
+            {'resource_info': resource},  # payload
+            'INFO',  # priority is always INFO...
+        ]
+
+        with mock.patch.object(notifications._get_notifier(),
+                               '_notify') as mocked:
+            notifications._send_notification(operation, resource_type,
+                                             resource)
+            mocked.assert_called_once_with(*expected_args)
+
+
+class BaseNotificationTest(test_v3.RestfulTestCase):
+
+    def setUp(self):
+        super(BaseNotificationTest, self).setUp()
+
+        self._notifications = []
+        self._audits = []
+
+        def fake_notify(operation, resource_type, resource_id,
+                        public=True):
+            note = {
+                'resource_id': resource_id,
+                'operation': operation,
+                'resource_type': resource_type,
+                'send_notification_called': True,
+                'public': public}
+            self._notifications.append(note)
+
+        self.useFixture(mockpatch.PatchObject(
+            notifications, '_send_notification', fake_notify))
+
+        def fake_audit(action, initiator, outcome, target,
+                       event_type, **kwargs):
+            service_security = cadftaxonomy.SERVICE_SECURITY
+
+            event = eventfactory.EventFactory().new_event(
+                eventType=cadftype.EVENTTYPE_ACTIVITY,
+                outcome=outcome,
+                action=action,
+                initiator=initiator,
+                target=target,
+                observer=cadfresource.Resource(typeURI=service_security))
+
+            for key, value in kwargs.items():
+                setattr(event, key, value)
+
+            audit = {
+                'payload': event.as_dict(),
+                'event_type': event_type,
+                'send_notification_called': True}
+            self._audits.append(audit)
+
+        self.useFixture(mockpatch.PatchObject(
+            notifications, '_send_audit_notification', fake_audit))
+
+    def _assert_last_note(self, resource_id, operation, resource_type):
+        # NOTE(stevemar): If 'basic' format is not used, then simply
+        # return since this assertion is not valid.
+        if CONF.notification_format != 'basic':
+            return
+        self.assertTrue(len(self._notifications) > 0)
+        note = self._notifications[-1]
+        self.assertEqual(note['operation'], operation)
+        self.assertEqual(note['resource_id'], resource_id)
+        self.assertEqual(note['resource_type'], resource_type)
+        self.assertTrue(note['send_notification_called'])
+
+    def _assert_last_audit(self, resource_id, operation, resource_type,
+                           target_uri):
+        # NOTE(stevemar): If 'cadf' format is not used, then simply
+        # return since this assertion is not valid.
+        if CONF.notification_format != 'cadf':
+            return
+        self.assertTrue(len(self._audits) > 0)
+        audit = self._audits[-1]
+        payload = audit['payload']
+        self.assertEqual(resource_id, payload['resource_info'])
+        action = '%s.%s' % (operation, resource_type)
+        self.assertEqual(action, payload['action'])
+        self.assertEqual(target_uri, payload['target']['typeURI'])
+        self.assertEqual(resource_id, payload['target']['id'])
+        event_type = '%s.%s.%s' % ('identity', resource_type, operation)
+        self.assertEqual(event_type, audit['event_type'])
+        self.assertTrue(audit['send_notification_called'])
+
+    def _assert_notify_not_sent(self, resource_id, operation, resource_type,
+                                public=True):
+        unexpected = {
+            'resource_id': resource_id,
+            'operation': operation,
+            'resource_type': resource_type,
+            'send_notification_called': True,
+            'public': public}
+        for note in self._notifications:
+            self.assertNotEqual(unexpected, note)
+
+    def _assert_notify_sent(self, resource_id, operation, resource_type,
+                            public=True):
+        expected = {
+            'resource_id': resource_id,
+            'operation': operation,
+            'resource_type': resource_type,
+            'send_notification_called': True,
+            'public': public}
+        for note in self._notifications:
+            if expected == note:
+                break
+        else:
+            self.fail("Notification not sent.")
+
+
+class NotificationsForEntities(BaseNotificationTest):
+
+    def test_create_group(self):
+        group_ref = self.new_group_ref(domain_id=self.domain_id)
+        group_ref = self.identity_api.create_group(group_ref)
+        self._assert_last_note(group_ref['id'], CREATED_OPERATION, 'group')
+        self._assert_last_audit(group_ref['id'], CREATED_OPERATION, 'group',
+                                cadftaxonomy.SECURITY_GROUP)
+
+    def test_create_project(self):
+        project_ref = self.new_project_ref(domain_id=self.domain_id)
+        self.assignment_api.create_project(project_ref['id'], project_ref)
+        self._assert_last_note(
+            project_ref['id'], CREATED_OPERATION, 'project')
+        self._assert_last_audit(project_ref['id'], CREATED_OPERATION,
+                                'project', cadftaxonomy.SECURITY_PROJECT)
+
+    def test_create_role(self):
+        role_ref = self.new_role_ref()
+        self.role_api.create_role(role_ref['id'], role_ref)
+        self._assert_last_note(role_ref['id'], CREATED_OPERATION, 'role')
+        self._assert_last_audit(role_ref['id'], CREATED_OPERATION, 'role',
+                                cadftaxonomy.SECURITY_ROLE)
+
+    def test_create_user(self):
+        user_ref = self.new_user_ref(domain_id=self.domain_id)
+        user_ref = self.identity_api.create_user(user_ref)
+        self._assert_last_note(user_ref['id'], CREATED_OPERATION, 'user')
+        self._assert_last_audit(user_ref['id'], CREATED_OPERATION, 'user',
+                                cadftaxonomy.SECURITY_ACCOUNT_USER)
+
+    def test_create_trust(self):
+        trustor = self.new_user_ref(domain_id=self.domain_id)
+        trustor = self.identity_api.create_user(trustor)
+        trustee = self.new_user_ref(domain_id=self.domain_id)
+        trustee = self.identity_api.create_user(trustee)
+        role_ref = self.new_role_ref()
+        self.role_api.create_role(role_ref['id'], role_ref)
+        trust_ref = self.new_trust_ref(trustor['id'],
+                                       trustee['id'])
+        self.trust_api.create_trust(trust_ref['id'],
+                                    trust_ref,
+                                    [role_ref])
+        self._assert_last_note(
+            trust_ref['id'], CREATED_OPERATION, 'OS-TRUST:trust')
+        self._assert_last_audit(trust_ref['id'], CREATED_OPERATION,
+                                'OS-TRUST:trust', cadftaxonomy.SECURITY_TRUST)
+
+    def test_delete_group(self):
+        group_ref = self.new_group_ref(domain_id=self.domain_id)
+        group_ref = self.identity_api.create_group(group_ref)
+        self.identity_api.delete_group(group_ref['id'])
+        self._assert_last_note(group_ref['id'], DELETED_OPERATION, 'group')
+        self._assert_last_audit(group_ref['id'], DELETED_OPERATION, 'group',
+                                cadftaxonomy.SECURITY_GROUP)
+
+    def test_delete_project(self):
+        project_ref = self.new_project_ref(domain_id=self.domain_id)
+        self.assignment_api.create_project(project_ref['id'], project_ref)
+        self.assignment_api.delete_project(project_ref['id'])
+        self._assert_last_note(
+            project_ref['id'], DELETED_OPERATION, 'project')
+        self._assert_last_audit(project_ref['id'], DELETED_OPERATION,
+                                'project', cadftaxonomy.SECURITY_PROJECT)
+
+    def test_delete_role(self):
+        role_ref = self.new_role_ref()
+        self.role_api.create_role(role_ref['id'], role_ref)
+        self.role_api.delete_role(role_ref['id'])
+        self._assert_last_note(role_ref['id'], DELETED_OPERATION, 'role')
+        self._assert_last_audit(role_ref['id'], DELETED_OPERATION, 'role',
+                                cadftaxonomy.SECURITY_ROLE)
+
+    def test_delete_user(self):
+        user_ref = self.new_user_ref(domain_id=self.domain_id)
+        user_ref = self.identity_api.create_user(user_ref)
+        self.identity_api.delete_user(user_ref['id'])
+        self._assert_last_note(user_ref['id'], DELETED_OPERATION, 'user')
+        self._assert_last_audit(user_ref['id'], DELETED_OPERATION, 'user',
+                                cadftaxonomy.SECURITY_ACCOUNT_USER)
+
+    def test_create_domain(self):
+        domain_ref = self.new_domain_ref()
+        self.resource_api.create_domain(domain_ref['id'], domain_ref)
+        self._assert_last_note(domain_ref['id'], CREATED_OPERATION, 'domain')
+        self._assert_last_audit(domain_ref['id'], CREATED_OPERATION, 'domain',
+                                cadftaxonomy.SECURITY_DOMAIN)
+
+    def test_update_domain(self):
+        domain_ref = self.new_domain_ref()
+        self.assignment_api.create_domain(domain_ref['id'], domain_ref)
+        domain_ref['description'] = uuid.uuid4().hex
+        self.assignment_api.update_domain(domain_ref['id'], domain_ref)
+        self._assert_last_note(domain_ref['id'], UPDATED_OPERATION, 'domain')
+        self._assert_last_audit(domain_ref['id'], UPDATED_OPERATION, 'domain',
+                                cadftaxonomy.SECURITY_DOMAIN)
+
+    def test_delete_domain(self):
+        domain_ref = self.new_domain_ref()
+        self.assignment_api.create_domain(domain_ref['id'], domain_ref)
+        domain_ref['enabled'] = False
+        self.assignment_api.update_domain(domain_ref['id'], domain_ref)
+        self.assignment_api.delete_domain(domain_ref['id'])
+        self._assert_last_note(domain_ref['id'], DELETED_OPERATION, 'domain')
+        self._assert_last_audit(domain_ref['id'], DELETED_OPERATION, 'domain',
+                                cadftaxonomy.SECURITY_DOMAIN)
+
+    def test_delete_trust(self):
+        trustor = self.new_user_ref(domain_id=self.domain_id)
+        trustor = self.identity_api.create_user(trustor)
+        trustee = self.new_user_ref(domain_id=self.domain_id)
+        trustee = self.identity_api.create_user(trustee)
+        role_ref = self.new_role_ref()
+        trust_ref = self.new_trust_ref(trustor['id'], trustee['id'])
+        self.trust_api.create_trust(trust_ref['id'],
+                                    trust_ref,
+                                    [role_ref])
+        self.trust_api.delete_trust(trust_ref['id'])
+        self._assert_last_note(
+            trust_ref['id'], DELETED_OPERATION, 'OS-TRUST:trust')
+        self._assert_last_audit(trust_ref['id'], DELETED_OPERATION,
+                                'OS-TRUST:trust', cadftaxonomy.SECURITY_TRUST)
+
+    def test_create_endpoint(self):
+        endpoint_ref = self.new_endpoint_ref(service_id=self.service_id)
+        self.catalog_api.create_endpoint(endpoint_ref['id'], endpoint_ref)
+        self._assert_notify_sent(endpoint_ref['id'], CREATED_OPERATION,
+                                 'endpoint')
+        self._assert_last_audit(endpoint_ref['id'], CREATED_OPERATION,
+                                'endpoint', cadftaxonomy.SECURITY_ENDPOINT)
+
+    def test_update_endpoint(self):
+        endpoint_ref = self.new_endpoint_ref(service_id=self.service_id)
+        self.catalog_api.create_endpoint(endpoint_ref['id'], endpoint_ref)
+        self.catalog_api.update_endpoint(endpoint_ref['id'], endpoint_ref)
+        self._assert_notify_sent(endpoint_ref['id'], UPDATED_OPERATION,
+                                 'endpoint')
+        self._assert_last_audit(endpoint_ref['id'], UPDATED_OPERATION,
+                                'endpoint', cadftaxonomy.SECURITY_ENDPOINT)
+
+    def test_delete_endpoint(self):
+        endpoint_ref = self.new_endpoint_ref(service_id=self.service_id)
+        self.catalog_api.create_endpoint(endpoint_ref['id'], endpoint_ref)
+        self.catalog_api.delete_endpoint(endpoint_ref['id'])
+        self._assert_notify_sent(endpoint_ref['id'], DELETED_OPERATION,
+                                 'endpoint')
+        self._assert_last_audit(endpoint_ref['id'], DELETED_OPERATION,
+                                'endpoint', cadftaxonomy.SECURITY_ENDPOINT)
+
+    def test_create_service(self):
+        service_ref = self.new_service_ref()
+        self.catalog_api.create_service(service_ref['id'], service_ref)
+        self._assert_notify_sent(service_ref['id'], CREATED_OPERATION,
+                                 'service')
+        self._assert_last_audit(service_ref['id'], CREATED_OPERATION,
+                                'service', cadftaxonomy.SECURITY_SERVICE)
+
+    def test_update_service(self):
+        service_ref = self.new_service_ref()
+        self.catalog_api.create_service(service_ref['id'], service_ref)
+        self.catalog_api.update_service(service_ref['id'], service_ref)
+        self._assert_notify_sent(service_ref['id'], UPDATED_OPERATION,
+                                 'service')
+        self._assert_last_audit(service_ref['id'], UPDATED_OPERATION,
+                                'service', cadftaxonomy.SECURITY_SERVICE)
+
+    def test_delete_service(self):
+        service_ref = self.new_service_ref()
+        self.catalog_api.create_service(service_ref['id'], service_ref)
+        self.catalog_api.delete_service(service_ref['id'])
+        self._assert_notify_sent(service_ref['id'], DELETED_OPERATION,
+                                 'service')
+        self._assert_last_audit(service_ref['id'], DELETED_OPERATION,
+                                'service', cadftaxonomy.SECURITY_SERVICE)
+
+    def test_create_region(self):
+        region_ref = self.new_region_ref()
+        self.catalog_api.create_region(region_ref)
+        self._assert_notify_sent(region_ref['id'], CREATED_OPERATION,
+                                 'region')
+        self._assert_last_audit(region_ref['id'], CREATED_OPERATION,
+                                'region', cadftaxonomy.SECURITY_REGION)
+
+    def test_update_region(self):
+        region_ref = self.new_region_ref()
+        self.catalog_api.create_region(region_ref)
+        self.catalog_api.update_region(region_ref['id'], region_ref)
+        self._assert_notify_sent(region_ref['id'], UPDATED_OPERATION,
+                                 'region')
+        self._assert_last_audit(region_ref['id'], UPDATED_OPERATION,
+                                'region', cadftaxonomy.SECURITY_REGION)
+
+    def test_delete_region(self):
+        region_ref = self.new_region_ref()
+        self.catalog_api.create_region(region_ref)
+        self.catalog_api.delete_region(region_ref['id'])
+        self._assert_notify_sent(region_ref['id'], DELETED_OPERATION,
+                                 'region')
+        self._assert_last_audit(region_ref['id'], DELETED_OPERATION,
+                                'region', cadftaxonomy.SECURITY_REGION)
+
+    def test_create_policy(self):
+        policy_ref = self.new_policy_ref()
+        self.policy_api.create_policy(policy_ref['id'], policy_ref)
+        self._assert_notify_sent(policy_ref['id'], CREATED_OPERATION,
+                                 'policy')
+        self._assert_last_audit(policy_ref['id'], CREATED_OPERATION,
+                                'policy', cadftaxonomy.SECURITY_POLICY)
+
+    def test_update_policy(self):
+        policy_ref = self.new_policy_ref()
+        self.policy_api.create_policy(policy_ref['id'], policy_ref)
+        self.policy_api.update_policy(policy_ref['id'], policy_ref)
+        self._assert_notify_sent(policy_ref['id'], UPDATED_OPERATION,
+                                 'policy')
+        self._assert_last_audit(policy_ref['id'], UPDATED_OPERATION,
+                                'policy', cadftaxonomy.SECURITY_POLICY)
+
+    def test_delete_policy(self):
+        policy_ref = self.new_policy_ref()
+        self.policy_api.create_policy(policy_ref['id'], policy_ref)
+        self.policy_api.delete_policy(policy_ref['id'])
+        self._assert_notify_sent(policy_ref['id'], DELETED_OPERATION,
+                                 'policy')
+        self._assert_last_audit(policy_ref['id'], DELETED_OPERATION,
+                                'policy', cadftaxonomy.SECURITY_POLICY)
+
+    def test_disable_domain(self):
+        domain_ref = self.new_domain_ref()
+        self.assignment_api.create_domain(domain_ref['id'], domain_ref)
+        domain_ref['enabled'] = False
+        self.assignment_api.update_domain(domain_ref['id'], domain_ref)
+        self._assert_notify_sent(domain_ref['id'], 'disabled', 'domain',
+                                 public=False)
+
+    def test_disable_of_disabled_domain_does_not_notify(self):
+        domain_ref = self.new_domain_ref()
+        domain_ref['enabled'] = False
+        self.assignment_api.create_domain(domain_ref['id'], domain_ref)
+        # The domain_ref above is not changed during the create process. We
+        # can use the same ref to perform the update.
+        self.assignment_api.update_domain(domain_ref['id'], domain_ref)
+        self._assert_notify_not_sent(domain_ref['id'], 'disabled', 'domain',
+                                     public=False)
+
+    def test_update_group(self):
+        group_ref = self.new_group_ref(domain_id=self.domain_id)
+        group_ref = self.identity_api.create_group(group_ref)
+        self.identity_api.update_group(group_ref['id'], group_ref)
+        self._assert_last_note(group_ref['id'], UPDATED_OPERATION, 'group')
+        self._assert_last_audit(group_ref['id'], UPDATED_OPERATION, 'group',
+                                cadftaxonomy.SECURITY_GROUP)
+
+    def test_update_project(self):
+        project_ref = self.new_project_ref(domain_id=self.domain_id)
+        self.assignment_api.create_project(project_ref['id'], project_ref)
+        self.assignment_api.update_project(project_ref['id'], project_ref)
+        self._assert_notify_sent(
+            project_ref['id'], UPDATED_OPERATION, 'project', public=True)
+        self._assert_last_audit(project_ref['id'], UPDATED_OPERATION,
+                                'project', cadftaxonomy.SECURITY_PROJECT)
+
+    def test_disable_project(self):
+        project_ref = self.new_project_ref(domain_id=self.domain_id)
+        self.assignment_api.create_project(project_ref['id'], project_ref)
+        project_ref['enabled'] = False
+        self.assignment_api.update_project(project_ref['id'], project_ref)
+        self._assert_notify_sent(project_ref['id'], 'disabled', 'project',
+                                 public=False)
+
+    def test_disable_of_disabled_project_does_not_notify(self):
+        project_ref = self.new_project_ref(domain_id=self.domain_id)
+        project_ref['enabled'] = False
+        self.assignment_api.create_project(project_ref['id'], project_ref)
+        # The project_ref above is not changed during the create process. We
+        # can use the same ref to perform the update.
+        self.assignment_api.update_project(project_ref['id'], project_ref)
+        self._assert_notify_not_sent(project_ref['id'], 'disabled', 'project',
+                                     public=False)
+
+    def test_update_project_does_not_send_disable(self):
+        project_ref = self.new_project_ref(domain_id=self.domain_id)
+        self.assignment_api.create_project(project_ref['id'], project_ref)
+        project_ref['enabled'] = True
+        self.assignment_api.update_project(project_ref['id'], project_ref)
+        self._assert_last_note(
+            project_ref['id'], UPDATED_OPERATION, 'project')
+        self._assert_notify_not_sent(project_ref['id'], 'disabled', 'project')
+
+    def test_update_role(self):
+        role_ref = self.new_role_ref()
+        self.role_api.create_role(role_ref['id'], role_ref)
+        self.role_api.update_role(role_ref['id'], role_ref)
+        self._assert_last_note(role_ref['id'], UPDATED_OPERATION, 'role')
+        self._assert_last_audit(role_ref['id'], UPDATED_OPERATION, 'role',
+                                cadftaxonomy.SECURITY_ROLE)
+
+    def test_update_user(self):
+        user_ref = self.new_user_ref(domain_id=self.domain_id)
+        user_ref = self.identity_api.create_user(user_ref)
+        self.identity_api.update_user(user_ref['id'], user_ref)
+        self._assert_last_note(user_ref['id'], UPDATED_OPERATION, 'user')
+        self._assert_last_audit(user_ref['id'], UPDATED_OPERATION, 'user',
+                                cadftaxonomy.SECURITY_ACCOUNT_USER)
+
+    def test_config_option_no_events(self):
+        self.config_fixture.config(notification_format='basic')
+        role_ref = self.new_role_ref()
+        self.role_api.create_role(role_ref['id'], role_ref)
+        # The regular notifications will still be emitted, since they are
+        # used for callback handling.
+        self._assert_last_note(role_ref['id'], CREATED_OPERATION, 'role')
+        # No audit event should have occurred
+        self.assertEqual(0, len(self._audits))
+
+
+class CADFNotificationsForEntities(NotificationsForEntities):
+
+    def setUp(self):
+        super(CADFNotificationsForEntities, self).setUp()
+        self.config_fixture.config(notification_format='cadf')
+
+    def test_initiator_data_is_set(self):
+        ref = self.new_domain_ref()
+        resp = self.post('/domains', body={'domain': ref})
+        resource_id = resp.result.get('domain').get('id')
+        self._assert_last_audit(resource_id, CREATED_OPERATION, 'domain',
+                                cadftaxonomy.SECURITY_DOMAIN)
+        self.assertTrue(len(self._audits) > 0)
+        audit = self._audits[-1]
+        payload = audit['payload']
+        self.assertEqual(self.user_id, payload['initiator']['id'])
+        self.assertEqual(self.project_id, payload['initiator']['project_id'])
+
+
+class TestEventCallbacks(test_v3.RestfulTestCase):
+
+    def setUp(self):
+        super(TestEventCallbacks, self).setUp()
+        self.has_been_called = False
+
+    def _project_deleted_callback(self, service, resource_type, operation,
+                                  payload):
+        self.has_been_called = True
+
+    def _project_created_callback(self, service, resource_type, operation,
+                                  payload):
+        self.has_been_called = True
+
+    def test_notification_received(self):
+        callback = register_callback(CREATED_OPERATION, 'project')
+        project_ref = self.new_project_ref(domain_id=self.domain_id)
+        self.assignment_api.create_project(project_ref['id'], project_ref)
+        self.assertTrue(callback.called)
+
+    def test_notification_method_not_callable(self):
+        fake_method = None
+        self.assertRaises(TypeError,
+                          notifications.register_event_callback,
+                          UPDATED_OPERATION,
+                          'project',
+                          [fake_method])
+
+    def test_notification_event_not_valid(self):
+        self.assertRaises(ValueError,
+                          notifications.register_event_callback,
+                          uuid.uuid4().hex,
+                          'project',
+                          self._project_deleted_callback)
+
+    def test_event_registration_for_unknown_resource_type(self):
+        # Registration for unknown resource types should succeed.  If no event
+        # is issued for that resource type, the callback wont be triggered.
+        notifications.register_event_callback(DELETED_OPERATION,
+                                              uuid.uuid4().hex,
+                                              self._project_deleted_callback)
+        resource_type = uuid.uuid4().hex
+        notifications.register_event_callback(DELETED_OPERATION,
+                                              resource_type,
+                                              self._project_deleted_callback)
+
+    def test_provider_event_callbacks_subscription(self):
+        callback_called = []
+
+        @dependency.provider('foo_api')
+        class Foo(object):
+            def __init__(self):
+                self.event_callbacks = {
+                    CREATED_OPERATION: {'project': [self.foo_callback]}}
+
+            def foo_callback(self, service, resource_type, operation,
+                             payload):
+                # uses callback_called from the closure
+                callback_called.append(True)
+
+        Foo()
+        project_ref = self.new_project_ref(domain_id=self.domain_id)
+        self.assignment_api.create_project(project_ref['id'], project_ref)
+        self.assertEqual([True], callback_called)
+
+    def test_invalid_event_callbacks(self):
+        @dependency.provider('foo_api')
+        class Foo(object):
+            def __init__(self):
+                self.event_callbacks = 'bogus'
+
+        self.assertRaises(ValueError, Foo)
+
+    def test_invalid_event_callbacks_event(self):
+        @dependency.provider('foo_api')
+        class Foo(object):
+            def __init__(self):
+                self.event_callbacks = {CREATED_OPERATION: 'bogus'}
+
+        self.assertRaises(ValueError, Foo)
+
+
+class CadfNotificationsWrapperTestCase(test_v3.RestfulTestCase):
+
+    LOCAL_HOST = 'localhost'
+    ACTION = 'authenticate'
+    ROLE_ASSIGNMENT = 'role_assignment'
+
+    def setUp(self):
+        super(CadfNotificationsWrapperTestCase, self).setUp()
+        self._notifications = []
+
+        def fake_notify(action, initiator, outcome, target,
+                        event_type, **kwargs):
+            service_security = cadftaxonomy.SERVICE_SECURITY
+
+            event = eventfactory.EventFactory().new_event(
+                eventType=cadftype.EVENTTYPE_ACTIVITY,
+                outcome=outcome,
+                action=action,
+                initiator=initiator,
+                target=target,
+                observer=cadfresource.Resource(typeURI=service_security))
+
+            for key, value in kwargs.items():
+                setattr(event, key, value)
+
+            note = {
+                'action': action,
+                'initiator': initiator,
+                'event': event,
+                'send_notification_called': True}
+            self._notifications.append(note)
+
+        self.useFixture(mockpatch.PatchObject(
+            notifications, '_send_audit_notification', fake_notify))
+
+    def _assert_last_note(self, action, user_id):
+        self.assertTrue(self._notifications)
+        note = self._notifications[-1]
+        self.assertEqual(note['action'], action)
+        initiator = note['initiator']
+        self.assertEqual(initiator.id, user_id)
+        self.assertEqual(initiator.host.address, self.LOCAL_HOST)
+        self.assertTrue(note['send_notification_called'])
+
+    def _assert_event(self, role_id, project=None, domain=None,
+                      user=None, group=None, inherit=False):
+        """Assert that the CADF event is valid.
+
+        In the case of role assignments, the event will have extra data,
+        specifically, the role, target, actor, and if the role is inherited.
+
+        An example event, as a dictionary is seen below:
+            {
+                'typeURI': 'http://schemas.dmtf.org/cloud/audit/1.0/event',
+                'initiator': {
+                    'typeURI': 'service/security/account/user',
+                    'host': {'address': 'localhost'},
+                    'id': 'openstack:0a90d95d-582c-4efb-9cbc-e2ca7ca9c341',
+                    'name': u'bccc2d9bfc2a46fd9e33bcf82f0b5c21'
+                },
+                'target': {
+                    'typeURI': 'service/security/account/user',
+                    'id': 'openstack:d48ea485-ef70-4f65-8d2b-01aa9d7ec12d'
+                },
+                'observer': {
+                    'typeURI': 'service/security',
+                    'id': 'openstack:d51dd870-d929-4aba-8d75-dcd7555a0c95'
+                },
+                'eventType': 'activity',
+                'eventTime': '2014-08-21T21:04:56.204536+0000',
+                'role': u'0e6b990380154a2599ce6b6e91548a68',
+                'domain': u'24bdcff1aab8474895dbaac509793de1',
+                'inherited_to_projects': False,
+                'group': u'c1e22dc67cbd469ea0e33bf428fe597a',
+                'action': 'created.role_assignment',
+                'outcome': 'success',
+                'id': 'openstack:782689dd-f428-4f13-99c7-5c70f94a5ac1'
+            }
+        """
+
+        note = self._notifications[-1]
+        event = note['event']
+        if project:
+            self.assertEqual(project, event.project)
+        if domain:
+            self.assertEqual(domain, event.domain)
+        if user:
+            self.assertEqual(user, event.user)
+        if group:
+            self.assertEqual(group, event.group)
+        self.assertEqual(role_id, event.role)
+        self.assertEqual(inherit, event.inherited_to_projects)
+
+    def test_v3_authenticate_user_name_and_domain_id(self):
+        user_id = self.user_id
+        user_name = self.user['name']
+        password = self.user['password']
+        domain_id = self.domain_id
+        data = self.build_authentication_request(username=user_name,
+                                                 user_domain_id=domain_id,
+                                                 password=password)
+        self.post('/auth/tokens', body=data)
+        self._assert_last_note(self.ACTION, user_id)
+
+    def test_v3_authenticate_user_id(self):
+        user_id = self.user_id
+        password = self.user['password']
+        data = self.build_authentication_request(user_id=user_id,
+                                                 password=password)
+        self.post('/auth/tokens', body=data)
+        self._assert_last_note(self.ACTION, user_id)
+
+    def test_v3_authenticate_user_name_and_domain_name(self):
+        user_id = self.user_id
+        user_name = self.user['name']
+        password = self.user['password']
+        domain_name = self.domain['name']
+        data = self.build_authentication_request(username=user_name,
+                                                 user_domain_name=domain_name,
+                                                 password=password)
+        self.post('/auth/tokens', body=data)
+        self._assert_last_note(self.ACTION, user_id)
+
+    def _test_role_assignment(self, url, role, project=None, domain=None,
+                              user=None, group=None):
+        self.put(url)
+        action = "%s.%s" % (CREATED_OPERATION, self.ROLE_ASSIGNMENT)
+        self._assert_last_note(action, self.user_id)
+        self._assert_event(role, project, domain, user, group)
+        self.delete(url)
+        action = "%s.%s" % (DELETED_OPERATION, self.ROLE_ASSIGNMENT)
+        self._assert_last_note(action, self.user_id)
+        self._assert_event(role, project, domain, user, group)
+
+    def test_user_project_grant(self):
+        url = ('/projects/%s/users/%s/roles/%s' %
+               (self.project_id, self.user_id, self.role_id))
+        self._test_role_assignment(url, self.role_id,
+                                   project=self.project_id,
+                                   user=self.user_id)
+
+    def test_group_domain_grant(self):
+        group_ref = self.new_group_ref(domain_id=self.domain_id)
+        group = self.identity_api.create_group(group_ref)
+        url = ('/domains/%s/groups/%s/roles/%s' %
+               (self.domain_id, group['id'], self.role_id))
+        self._test_role_assignment(url, self.role_id,
+                                   domain=self.domain_id,
+                                   group=group['id'])
+
+
+class TestCallbackRegistration(testtools.TestCase):
+    def setUp(self):
+        super(TestCallbackRegistration, self).setUp()
+        self.mock_log = mock.Mock()
+        # Force the callback logging to occur
+        self.mock_log.logger.getEffectiveLevel.return_value = logging.DEBUG
+
+    def verify_log_message(self, data):
+        """Tests that use this are a little brittle because adding more
+        logging can break them.
+
+        TODO(dstanek): remove the need for this in a future refactoring
+
+        """
+        log_fn = self.mock_log.debug
+        self.assertEqual(len(data), log_fn.call_count)
+        for datum in data:
+            log_fn.assert_any_call(mock.ANY, datum)
+
+    def test_a_function_callback(self):
+        def callback(*args, **kwargs):
+            pass
+
+        resource_type = 'thing'
+        with mock.patch('keystone.notifications.LOG', self.mock_log):
+            notifications.register_event_callback(
+                CREATED_OPERATION, resource_type, callback)
+
+        callback = 'keystone.tests.unit.common.test_notifications.callback'
+        expected_log_data = {
+            'callback': callback,
+            'event': 'identity.%s.created' % resource_type
+        }
+        self.verify_log_message([expected_log_data])
+
+    def test_a_method_callback(self):
+        class C(object):
+            def callback(self, *args, **kwargs):
+                pass
+
+        with mock.patch('keystone.notifications.LOG', self.mock_log):
+            notifications.register_event_callback(
+                CREATED_OPERATION, 'thing', C.callback)
+
+        callback = 'keystone.tests.unit.common.test_notifications.C.callback'
+        expected_log_data = {
+            'callback': callback,
+            'event': 'identity.thing.created'
+        }
+        self.verify_log_message([expected_log_data])
+
+    def test_a_list_of_callbacks(self):
+        def callback(*args, **kwargs):
+            pass
+
+        class C(object):
+            def callback(self, *args, **kwargs):
+                pass
+
+        with mock.patch('keystone.notifications.LOG', self.mock_log):
+            notifications.register_event_callback(
+                CREATED_OPERATION, 'thing', [callback, C.callback])
+
+        callback_1 = 'keystone.tests.unit.common.test_notifications.callback'
+        callback_2 = 'keystone.tests.unit.common.test_notifications.C.callback'
+        expected_log_data = [
+            {
+                'callback': callback_1,
+                'event': 'identity.thing.created'
+            },
+            {
+                'callback': callback_2,
+                'event': 'identity.thing.created'
+            },
+        ]
+        self.verify_log_message(expected_log_data)
+
+    def test_an_invalid_callback(self):
+        self.assertRaises(TypeError,
+                          notifications.register_event_callback,
+                          (CREATED_OPERATION, 'thing', object()))
+
+    def test_an_invalid_event(self):
+        def callback(*args, **kwargs):
+            pass
+
+        self.assertRaises(ValueError,
+                          notifications.register_event_callback,
+                          uuid.uuid4().hex,
+                          'thing',
+                          callback)
diff --git a/keystone-moon/keystone/tests/unit/common/test_pemutils.py b/keystone-moon/keystone/tests/unit/common/test_pemutils.py
new file mode 100644 (file)
index 0000000..c2f5851
--- /dev/null
@@ -0,0 +1,337 @@
+# Copyright 2013 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import base64
+
+from six import moves
+
+from keystone.common import pemutils
+from keystone.tests import unit as tests
+
+
+# List of 2-tuples, (pem_type, pem_header)
+headers = pemutils.PEM_TYPE_TO_HEADER.items()
+
+
+def make_data(size, offset=0):
+    return ''.join([chr(x % 255) for x in moves.range(offset, size + offset)])
+
+
+def make_base64_from_data(data):
+    return base64.b64encode(data)
+
+
+def wrap_base64(base64_text):
+    wrapped_text = '\n'.join([base64_text[x:x + 64]
+                              for x in moves.range(0, len(base64_text), 64)])
+    wrapped_text += '\n'
+    return wrapped_text
+
+
+def make_pem(header, data):
+    base64_text = make_base64_from_data(data)
+    wrapped_text = wrap_base64(base64_text)
+
+    result = '-----BEGIN %s-----\n' % header
+    result += wrapped_text
+    result += '-----END %s-----\n' % header
+
+    return result
+
+
+class PEM(object):
+    """PEM text and it's associated data broken out, used for testing.
+
+    """
+    def __init__(self, pem_header='CERTIFICATE', pem_type='cert',
+                 data_size=70, data_offset=0):
+        self.pem_header = pem_header
+        self.pem_type = pem_type
+        self.data_size = data_size
+        self.data_offset = data_offset
+        self.data = make_data(self.data_size, self.data_offset)
+        self.base64_text = make_base64_from_data(self.data)
+        self.wrapped_base64 = wrap_base64(self.base64_text)
+        self.pem_text = make_pem(self.pem_header, self.data)
+
+
+class TestPEMParseResult(tests.BaseTestCase):
+
+    def test_pem_types(self):
+        for pem_type in pemutils.pem_types:
+            pem_header = pemutils.PEM_TYPE_TO_HEADER[pem_type]
+            r = pemutils.PEMParseResult(pem_type=pem_type)
+            self.assertEqual(pem_type, r.pem_type)
+            self.assertEqual(pem_header, r.pem_header)
+
+        pem_type = 'xxx'
+        self.assertRaises(ValueError,
+                          pemutils.PEMParseResult, pem_type=pem_type)
+
+    def test_pem_headers(self):
+        for pem_header in pemutils.pem_headers:
+            pem_type = pemutils.PEM_HEADER_TO_TYPE[pem_header]
+            r = pemutils.PEMParseResult(pem_header=pem_header)
+            self.assertEqual(pem_type, r.pem_type)
+            self.assertEqual(pem_header, r.pem_header)
+
+        pem_header = 'xxx'
+        self.assertRaises(ValueError,
+                          pemutils.PEMParseResult, pem_header=pem_header)
+
+
+class TestPEMParse(tests.BaseTestCase):
+    def test_parse_none(self):
+        text = ''
+        text += 'bla bla\n'
+        text += 'yada yada yada\n'
+        text += 'burfl blatz bingo\n'
+
+        parse_results = pemutils.parse_pem(text)
+        self.assertEqual(0, len(parse_results))
+
+        self.assertEqual(False, pemutils.is_pem(text))
+
+    def test_parse_invalid(self):
+        p = PEM(pem_type='xxx',
+                pem_header='XXX')
+        text = p.pem_text
+
+        self.assertRaises(ValueError,
+                          pemutils.parse_pem, text)
+
+    def test_parse_one(self):
+        data_size = 70
+        count = len(headers)
+        pems = []
+
+        for i in moves.range(count):
+            pems.append(PEM(pem_type=headers[i][0],
+                            pem_header=headers[i][1],
+                            data_size=data_size + i,
+                            data_offset=i))
+
+        for i in moves.range(count):
+            p = pems[i]
+            text = p.pem_text
+
+            parse_results = pemutils.parse_pem(text)
+            self.assertEqual(1, len(parse_results))
+
+            r = parse_results[0]
+            self.assertEqual(p.pem_type, r.pem_type)
+            self.assertEqual(p.pem_header, r.pem_header)
+            self.assertEqual(p.pem_text,
+                             text[r.pem_start:r.pem_end])
+            self.assertEqual(p.wrapped_base64,
+                             text[r.base64_start:r.base64_end])
+            self.assertEqual(p.data, r.binary_data)
+
+    def test_parse_one_embedded(self):
+        p = PEM(data_offset=0)
+        text = ''
+        text += 'bla bla\n'
+        text += 'yada yada yada\n'
+        text += p.pem_text
+        text += 'burfl blatz bingo\n'
+
+        parse_results = pemutils.parse_pem(text)
+        self.assertEqual(1, len(parse_results))
+
+        r = parse_results[0]
+        self.assertEqual(p.pem_type, r.pem_type)
+        self.assertEqual(p.pem_header, r.pem_header)
+        self.assertEqual(p.pem_text,
+                         text[r.pem_start:r.pem_end])
+        self.assertEqual(p.wrapped_base64,
+                         text[r.base64_start: r.base64_end])
+        self.assertEqual(p.data, r.binary_data)
+
+    def test_parse_multple(self):
+        data_size = 70
+        count = len(headers)
+        pems = []
+        text = ''
+
+        for i in moves.range(count):
+            pems.append(PEM(pem_type=headers[i][0],
+                            pem_header=headers[i][1],
+                            data_size=data_size + i,
+                            data_offset=i))
+
+        for i in moves.range(count):
+            text += pems[i].pem_text
+
+        parse_results = pemutils.parse_pem(text)
+        self.assertEqual(count, len(parse_results))
+
+        for i in moves.range(count):
+            r = parse_results[i]
+            p = pems[i]
+
+            self.assertEqual(p.pem_type, r.pem_type)
+            self.assertEqual(p.pem_header, r.pem_header)
+            self.assertEqual(p.pem_text,
+                             text[r.pem_start:r.pem_end])
+            self.assertEqual(p.wrapped_base64,
+                             text[r.base64_start: r.base64_end])
+            self.assertEqual(p.data, r.binary_data)
+
+    def test_parse_multple_find_specific(self):
+        data_size = 70
+        count = len(headers)
+        pems = []
+        text = ''
+
+        for i in moves.range(count):
+            pems.append(PEM(pem_type=headers[i][0],
+                            pem_header=headers[i][1],
+                            data_size=data_size + i,
+                            data_offset=i))
+
+        for i in moves.range(count):
+            text += pems[i].pem_text
+
+        for i in moves.range(count):
+            parse_results = pemutils.parse_pem(text, pem_type=headers[i][0])
+            self.assertEqual(1, len(parse_results))
+
+            r = parse_results[0]
+            p = pems[i]
+
+            self.assertEqual(p.pem_type, r.pem_type)
+            self.assertEqual(p.pem_header, r.pem_header)
+            self.assertEqual(p.pem_text,
+                             text[r.pem_start:r.pem_end])
+            self.assertEqual(p.wrapped_base64,
+                             text[r.base64_start:r.base64_end])
+            self.assertEqual(p.data, r.binary_data)
+
+    def test_parse_multple_embedded(self):
+        data_size = 75
+        count = len(headers)
+        pems = []
+        text = ''
+
+        for i in moves.range(count):
+            pems.append(PEM(pem_type=headers[i][0],
+                            pem_header=headers[i][1],
+                            data_size=data_size + i,
+                            data_offset=i))
+
+        for i in moves.range(count):
+            text += 'bla bla\n'
+            text += 'yada yada yada\n'
+            text += pems[i].pem_text
+            text += 'burfl blatz bingo\n'
+
+        parse_results = pemutils.parse_pem(text)
+        self.assertEqual(count, len(parse_results))
+
+        for i in moves.range(count):
+            r = parse_results[i]
+            p = pems[i]
+
+            self.assertEqual(p.pem_type, r.pem_type)
+            self.assertEqual(p.pem_header, r.pem_header)
+            self.assertEqual(p.pem_text,
+                             text[r.pem_start:r.pem_end])
+            self.assertEqual(p.wrapped_base64,
+                             text[r.base64_start:r.base64_end])
+            self.assertEqual(p.data, r.binary_data)
+
+    def test_get_pem_data_none(self):
+        text = ''
+        text += 'bla bla\n'
+        text += 'yada yada yada\n'
+        text += 'burfl blatz bingo\n'
+
+        data = pemutils.get_pem_data(text)
+        self.assertIsNone(data)
+
+    def test_get_pem_data_invalid(self):
+        p = PEM(pem_type='xxx',
+                pem_header='XXX')
+        text = p.pem_text
+
+        self.assertRaises(ValueError,
+                          pemutils.get_pem_data, text)
+
+    def test_get_pem_data(self):
+        data_size = 70
+        count = len(headers)
+        pems = []
+
+        for i in moves.range(count):
+            pems.append(PEM(pem_type=headers[i][0],
+                            pem_header=headers[i][1],
+                            data_size=data_size + i,
+                            data_offset=i))
+
+        for i in moves.range(count):
+            p = pems[i]
+            text = p.pem_text
+
+            data = pemutils.get_pem_data(text, p.pem_type)
+            self.assertEqual(p.data, data)
+
+    def test_is_pem(self):
+        data_size = 70
+        count = len(headers)
+        pems = []
+
+        for i in moves.range(count):
+            pems.append(PEM(pem_type=headers[i][0],
+                            pem_header=headers[i][1],
+                            data_size=data_size + i,
+                            data_offset=i))
+
+        for i in moves.range(count):
+            p = pems[i]
+            text = p.pem_text
+            self.assertTrue(pemutils.is_pem(text, pem_type=p.pem_type))
+            self.assertFalse(pemutils.is_pem(text,
+                                             pem_type=p.pem_type + 'xxx'))
+
+    def test_base64_to_pem(self):
+        data_size = 70
+        count = len(headers)
+        pems = []
+
+        for i in moves.range(count):
+            pems.append(PEM(pem_type=headers[i][0],
+                            pem_header=headers[i][1],
+                            data_size=data_size + i,
+                            data_offset=i))
+
+        for i in moves.range(count):
+            p = pems[i]
+            pem = pemutils.base64_to_pem(p.base64_text, p.pem_type)
+            self.assertEqual(pemutils.get_pem_data(pem, p.pem_type), p.data)
+
+    def test_binary_to_pem(self):
+        data_size = 70
+        count = len(headers)
+        pems = []
+
+        for i in moves.range(count):
+            pems.append(PEM(pem_type=headers[i][0],
+                            pem_header=headers[i][1],
+                            data_size=data_size + i,
+                            data_offset=i))
+
+        for i in moves.range(count):
+            p = pems[i]
+            pem = pemutils.binary_to_pem(p.data, p.pem_type)
+            self.assertEqual(pemutils.get_pem_data(pem, p.pem_type), p.data)
diff --git a/keystone-moon/keystone/tests/unit/common/test_sql_core.py b/keystone-moon/keystone/tests/unit/common/test_sql_core.py
new file mode 100644 (file)
index 0000000..1f33cfc
--- /dev/null
@@ -0,0 +1,52 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from sqlalchemy.ext import declarative
+
+from keystone.common import sql
+from keystone.tests import unit as tests
+from keystone.tests.unit import utils
+
+
+ModelBase = declarative.declarative_base()
+
+
+class TestModel(ModelBase, sql.ModelDictMixin):
+    __tablename__ = 'testmodel'
+    id = sql.Column(sql.String(64), primary_key=True)
+    text = sql.Column(sql.String(64), nullable=False)
+
+
+class TestModelDictMixin(tests.BaseTestCase):
+
+    def test_creating_a_model_instance_from_a_dict(self):
+        d = {'id': utils.new_uuid(), 'text': utils.new_uuid()}
+        m = TestModel.from_dict(d)
+        self.assertEqual(m.id, d['id'])
+        self.assertEqual(m.text, d['text'])
+
+    def test_creating_a_dict_from_a_model_instance(self):
+        m = TestModel(id=utils.new_uuid(), text=utils.new_uuid())
+        d = m.to_dict()
+        self.assertEqual(m.id, d['id'])
+        self.assertEqual(m.text, d['text'])
+
+    def test_creating_a_model_instance_from_an_invalid_dict(self):
+        d = {'id': utils.new_uuid(), 'text': utils.new_uuid(), 'extra': None}
+        self.assertRaises(TypeError, TestModel.from_dict, d)
+
+    def test_creating_a_dict_from_a_model_instance_that_has_extra_attrs(self):
+        expected = {'id': utils.new_uuid(), 'text': utils.new_uuid()}
+        m = TestModel(id=expected['id'], text=expected['text'])
+        m.extra = 'this should not be in the dictionary'
+        self.assertEqual(m.to_dict(), expected)
diff --git a/keystone-moon/keystone/tests/unit/common/test_utils.py b/keystone-moon/keystone/tests/unit/common/test_utils.py
new file mode 100644 (file)
index 0000000..184c814
--- /dev/null
@@ -0,0 +1,164 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import uuid
+
+from oslo_config import cfg
+from oslo_config import fixture as config_fixture
+from oslo_serialization import jsonutils
+
+from keystone.common import utils as common_utils
+from keystone import exception
+from keystone import service
+from keystone.tests import unit as tests
+from keystone.tests.unit import utils
+
+
+CONF = cfg.CONF
+
+TZ = utils.TZ
+
+
+class UtilsTestCase(tests.BaseTestCase):
+    OPTIONAL = object()
+
+    def setUp(self):
+        super(UtilsTestCase, self).setUp()
+        self.config_fixture = self.useFixture(config_fixture.Config(CONF))
+
+    def test_hash(self):
+        password = 'right'
+        wrong = 'wrongwrong'  # Two wrongs don't make a right
+        hashed = common_utils.hash_password(password)
+        self.assertTrue(common_utils.check_password(password, hashed))
+        self.assertFalse(common_utils.check_password(wrong, hashed))
+
+    def test_verify_normal_password_strict(self):
+        self.config_fixture.config(strict_password_check=False)
+        password = uuid.uuid4().hex
+        verified = common_utils.verify_length_and_trunc_password(password)
+        self.assertEqual(password, verified)
+
+    def test_that_a_hash_can_not_be_validated_against_a_hash(self):
+        # NOTE(dstanek): Bug 1279849 reported a problem where passwords
+        # were not being hashed if they already looked like a hash. This
+        # would allow someone to hash their password ahead of time
+        # (potentially getting around password requirements, like
+        # length) and then they could auth with their original password.
+        password = uuid.uuid4().hex
+        hashed_password = common_utils.hash_password(password)
+        new_hashed_password = common_utils.hash_password(hashed_password)
+        self.assertFalse(common_utils.check_password(password,
+                                                     new_hashed_password))
+
+    def test_verify_long_password_strict(self):
+        self.config_fixture.config(strict_password_check=False)
+        self.config_fixture.config(group='identity', max_password_length=5)
+        max_length = CONF.identity.max_password_length
+        invalid_password = 'passw0rd'
+        trunc = common_utils.verify_length_and_trunc_password(invalid_password)
+        self.assertEqual(invalid_password[:max_length], trunc)
+
+    def test_verify_long_password_strict_raises_exception(self):
+        self.config_fixture.config(strict_password_check=True)
+        self.config_fixture.config(group='identity', max_password_length=5)
+        invalid_password = 'passw0rd'
+        self.assertRaises(exception.PasswordVerificationError,
+                          common_utils.verify_length_and_trunc_password,
+                          invalid_password)
+
+    def test_hash_long_password_truncation(self):
+        self.config_fixture.config(strict_password_check=False)
+        invalid_length_password = '0' * 9999999
+        hashed = common_utils.hash_password(invalid_length_password)
+        self.assertTrue(common_utils.check_password(invalid_length_password,
+                                                    hashed))
+
+    def test_hash_long_password_strict(self):
+        self.config_fixture.config(strict_password_check=True)
+        invalid_length_password = '0' * 9999999
+        self.assertRaises(exception.PasswordVerificationError,
+                          common_utils.hash_password,
+                          invalid_length_password)
+
+    def _create_test_user(self, password=OPTIONAL):
+        user = {"name": "hthtest"}
+        if password is not self.OPTIONAL:
+            user['password'] = password
+
+        return user
+
+    def test_hash_user_password_without_password(self):
+        user = self._create_test_user()
+        hashed = common_utils.hash_user_password(user)
+        self.assertEqual(user, hashed)
+
+    def test_hash_user_password_with_null_password(self):
+        user = self._create_test_user(password=None)
+        hashed = common_utils.hash_user_password(user)
+        self.assertEqual(user, hashed)
+
+    def test_hash_user_password_with_empty_password(self):
+        password = ''
+        user = self._create_test_user(password=password)
+        user_hashed = common_utils.hash_user_password(user)
+        password_hashed = user_hashed['password']
+        self.assertTrue(common_utils.check_password(password, password_hashed))
+
+    def test_hash_edge_cases(self):
+        hashed = common_utils.hash_password('secret')
+        self.assertFalse(common_utils.check_password('', hashed))
+        self.assertFalse(common_utils.check_password(None, hashed))
+
+    def test_hash_unicode(self):
+        password = u'Comment \xe7a va'
+        wrong = 'Comment ?a va'
+        hashed = common_utils.hash_password(password)
+        self.assertTrue(common_utils.check_password(password, hashed))
+        self.assertFalse(common_utils.check_password(wrong, hashed))
+
+    def test_auth_str_equal(self):
+        self.assertTrue(common_utils.auth_str_equal('abc123', 'abc123'))
+        self.assertFalse(common_utils.auth_str_equal('a', 'aaaaa'))
+        self.assertFalse(common_utils.auth_str_equal('aaaaa', 'a'))
+        self.assertFalse(common_utils.auth_str_equal('ABC123', 'abc123'))
+
+    def test_unixtime(self):
+        global TZ
+
+        @utils.timezone
+        def _test_unixtime():
+            epoch = common_utils.unixtime(dt)
+            self.assertEqual(epoch, epoch_ans, "TZ=%s" % TZ)
+
+        dt = datetime.datetime(1970, 1, 2, 3, 4, 56, 0)
+        epoch_ans = 56 + 4 * 60 + 3 * 3600 + 86400
+        for d in ['+0', '-11', '-8', '-5', '+5', '+8', '+14']:
+            TZ = 'UTC' + d
+            _test_unixtime()
+
+    def test_pki_encoder(self):
+        data = {'field': 'value'}
+        json = jsonutils.dumps(data, cls=common_utils.PKIEncoder)
+        expected_json = b'{"field":"value"}'
+        self.assertEqual(expected_json, json)
+
+
+class ServiceHelperTests(tests.BaseTestCase):
+
+    @service.fail_gracefully
+    def _do_test(self):
+        raise Exception("Test Exc")
+
+    def test_fail_gracefully(self):
+        self.assertRaises(tests.UnexpectedExit, self._do_test)
diff --git a/keystone-moon/keystone/tests/unit/config_files/backend_db2.conf b/keystone-moon/keystone/tests/unit/config_files/backend_db2.conf
new file mode 100644 (file)
index 0000000..2bd0c1a
--- /dev/null
@@ -0,0 +1,4 @@
+#Used for running the Migrate tests against a live DB2 Server
+#See _sql_livetest.py
+[database]
+connection = ibm_db_sa://keystone:keystone@/staktest?charset=utf8
diff --git a/keystone-moon/keystone/tests/unit/config_files/backend_ldap.conf b/keystone-moon/keystone/tests/unit/config_files/backend_ldap.conf
new file mode 100644 (file)
index 0000000..3216118
--- /dev/null
@@ -0,0 +1,5 @@
+[ldap]
+url = fake://memory
+user = cn=Admin
+password = password
+suffix = cn=example,cn=com
diff --git a/keystone-moon/keystone/tests/unit/config_files/backend_ldap_pool.conf b/keystone-moon/keystone/tests/unit/config_files/backend_ldap_pool.conf
new file mode 100644 (file)
index 0000000..36fa1ac
--- /dev/null
@@ -0,0 +1,41 @@
+[ldap]
+url = fakepool://memory
+user = cn=Admin
+password = password
+backend_entities = ['Tenant', 'User', 'UserRoleAssociation', 'Role', 'Group', 'Domain']
+suffix = cn=example,cn=com
+
+# Connection pooling specific attributes
+
+# Enable LDAP connection pooling. (boolean value)
+use_pool=true
+
+# Connection pool size. (integer value)
+pool_size=5
+
+# Maximum count of reconnect trials. (integer value)
+pool_retry_max=2
+
+# Time span in seconds to wait between two reconnect trials.
+# (floating point value)
+pool_retry_delay=0.2
+
+# Connector timeout in seconds. Value -1 indicates indefinite
+# wait for response. (integer value)
+pool_connection_timeout=-1
+
+# Connection lifetime in seconds.
+# (integer value)
+pool_connection_lifetime=600
+
+# Enable LDAP connection pooling for end user authentication.
+# If use_pool is disabled, then this setting is meaningless
+# and is not used at all. (boolean value)
+use_auth_pool=true
+
+# End user auth connection pool size. (integer value)
+auth_pool_size=50
+
+# End user auth connection lifetime in seconds. (integer
+# value)
+auth_pool_connection_lifetime=60
\ No newline at end of file
diff --git a/keystone-moon/keystone/tests/unit/config_files/backend_ldap_sql.conf b/keystone-moon/keystone/tests/unit/config_files/backend_ldap_sql.conf
new file mode 100644 (file)
index 0000000..8a06f2f
--- /dev/null
@@ -0,0 +1,14 @@
+[database]
+#For a specific location file based sqlite use:
+#connection = sqlite:////tmp/keystone.db
+#To Test MySQL:
+#connection = mysql://keystone:keystone@localhost/keystone?charset=utf8
+#To Test PostgreSQL:
+#connection = postgresql://keystone:keystone@localhost/keystone?client_encoding=utf8
+idle_timeout = 200
+
+[ldap]
+url = fake://memory
+user = cn=Admin
+password = password
+suffix = cn=example,cn=com
diff --git a/keystone-moon/keystone/tests/unit/config_files/backend_liveldap.conf b/keystone-moon/keystone/tests/unit/config_files/backend_liveldap.conf
new file mode 100644 (file)
index 0000000..59cb857
--- /dev/null
@@ -0,0 +1,14 @@
+[ldap]
+url = ldap://localhost
+user = cn=Manager,dc=openstack,dc=org
+password = test
+suffix = dc=openstack,dc=org
+group_tree_dn = ou=UserGroups,dc=openstack,dc=org
+role_tree_dn = ou=Roles,dc=openstack,dc=org
+project_tree_dn = ou=Projects,dc=openstack,dc=org
+user_tree_dn = ou=Users,dc=openstack,dc=org
+project_enabled_emulation = True
+user_enabled_emulation = True
+user_mail_attribute = mail
+use_dumb_member = True
+
diff --git a/keystone-moon/keystone/tests/unit/config_files/backend_multi_ldap_sql.conf b/keystone-moon/keystone/tests/unit/config_files/backend_multi_ldap_sql.conf
new file mode 100644 (file)
index 0000000..2d04d83
--- /dev/null
@@ -0,0 +1,9 @@
+[database]
+connection = sqlite://
+#For a file based sqlite use
+#connection = sqlite:////tmp/keystone.db
+#To Test MySQL:
+#connection = mysql://keystone:keystone@localhost/keystone?charset=utf8
+#To Test PostgreSQL:
+#connection = postgresql://keystone:keystone@localhost/keystone?client_encoding=utf8
+idle_timeout = 200
diff --git a/keystone-moon/keystone/tests/unit/config_files/backend_mysql.conf b/keystone-moon/keystone/tests/unit/config_files/backend_mysql.conf
new file mode 100644 (file)
index 0000000..d612f72
--- /dev/null
@@ -0,0 +1,4 @@
+#Used for running the Migrate tests against a live Mysql Server
+#See _sql_livetest.py
+[database]
+connection = mysql://keystone:keystone@localhost/keystone_test?charset=utf8
diff --git a/keystone-moon/keystone/tests/unit/config_files/backend_pool_liveldap.conf b/keystone-moon/keystone/tests/unit/config_files/backend_pool_liveldap.conf
new file mode 100644 (file)
index 0000000..a85f522
--- /dev/null
@@ -0,0 +1,35 @@
+[ldap]
+url = ldap://localhost
+user = cn=Manager,dc=openstack,dc=org
+password = test
+suffix = dc=openstack,dc=org
+group_tree_dn = ou=UserGroups,dc=openstack,dc=org
+role_tree_dn = ou=Roles,dc=openstack,dc=org
+project_tree_dn = ou=Projects,dc=openstack,dc=org
+user_tree_dn = ou=Users,dc=openstack,dc=org
+project_enabled_emulation = True
+user_enabled_emulation = True
+user_mail_attribute = mail
+use_dumb_member = True
+
+# Connection pooling specific attributes
+
+# Enable LDAP connection pooling. (boolean value)
+use_pool=true
+# Connection pool size. (integer value)
+pool_size=5
+# Connection lifetime in seconds.
+# (integer value)
+pool_connection_lifetime=60
+
+# Enable LDAP connection pooling for end user authentication.
+# If use_pool is disabled, then this setting is meaningless
+# and is not used at all. (boolean value)
+use_auth_pool=true
+
+# End user auth connection pool size. (integer value)
+auth_pool_size=50
+
+# End user auth connection lifetime in seconds. (integer
+# value)
+auth_pool_connection_lifetime=300
\ No newline at end of file
diff --git a/keystone-moon/keystone/tests/unit/config_files/backend_postgresql.conf b/keystone-moon/keystone/tests/unit/config_files/backend_postgresql.conf
new file mode 100644 (file)
index 0000000..001805d
--- /dev/null
@@ -0,0 +1,4 @@
+#Used for running the Migrate tests against a live Postgresql Server
+#See _sql_livetest.py
+[database]
+connection = postgresql://keystone:keystone@localhost/keystone_test?client_encoding=utf8
diff --git a/keystone-moon/keystone/tests/unit/config_files/backend_sql.conf b/keystone-moon/keystone/tests/unit/config_files/backend_sql.conf
new file mode 100644 (file)
index 0000000..9d401af
--- /dev/null
@@ -0,0 +1,8 @@
+[database]
+#For a specific location file based sqlite use:
+#connection = sqlite:////tmp/keystone.db
+#To Test MySQL:
+#connection = mysql://keystone:keystone@localhost/keystone?charset=utf8
+#To Test PostgreSQL:
+#connection = postgresql://keystone:keystone@localhost/keystone?client_encoding=utf8
+idle_timeout = 200
diff --git a/keystone-moon/keystone/tests/unit/config_files/backend_tls_liveldap.conf b/keystone-moon/keystone/tests/unit/config_files/backend_tls_liveldap.conf
new file mode 100644 (file)
index 0000000..d35b913
--- /dev/null
@@ -0,0 +1,17 @@
+[ldap]
+url = ldap://
+user = dc=Manager,dc=openstack,dc=org
+password = test
+suffix = dc=openstack,dc=org
+group_tree_dn = ou=UserGroups,dc=openstack,dc=org
+role_tree_dn = ou=Roles,dc=openstack,dc=org
+project_tree_dn = ou=Projects,dc=openstack,dc=org
+user_tree_dn = ou=Users,dc=openstack,dc=org
+project_enabled_emulation = True
+user_enabled_emulation = True
+user_mail_attribute = mail
+use_dumb_member = True
+use_tls = True
+tls_cacertfile = /etc/keystone/ssl/certs/cacert.pem
+tls_cacertdir = /etc/keystone/ssl/certs/
+tls_req_cert = demand
diff --git a/keystone-moon/keystone/tests/unit/config_files/deprecated.conf b/keystone-moon/keystone/tests/unit/config_files/deprecated.conf
new file mode 100644 (file)
index 0000000..515e663
--- /dev/null
@@ -0,0 +1,8 @@
+# Options in this file are deprecated. See test_config.
+
+[sql]
+# These options were deprecated in Icehouse with the switch to oslo's
+# db.sqlalchemy.
+
+connection = sqlite://deprecated
+idle_timeout = 54321
diff --git a/keystone-moon/keystone/tests/unit/config_files/deprecated_override.conf b/keystone-moon/keystone/tests/unit/config_files/deprecated_override.conf
new file mode 100644 (file)
index 0000000..1d1c926
--- /dev/null
@@ -0,0 +1,15 @@
+# Options in this file are deprecated. See test_config.
+
+[sql]
+# These options were deprecated in Icehouse with the switch to oslo's
+# db.sqlalchemy.
+
+connection = sqlite://deprecated
+idle_timeout = 54321
+
+
+[database]
+# These are the new options from the [sql] section.
+
+connection = sqlite://new
+idle_timeout = 65432
diff --git a/keystone-moon/keystone/tests/unit/config_files/domain_configs_default_ldap_one_sql/keystone.domain1.conf b/keystone-moon/keystone/tests/unit/config_files/domain_configs_default_ldap_one_sql/keystone.domain1.conf
new file mode 100644 (file)
index 0000000..a4492a6
--- /dev/null
@@ -0,0 +1,5 @@
+# The domain-specific configuration file for the test domain
+# 'domain1' for use with unit tests.
+
+[identity]
+driver = keystone.identity.backends.sql.Identity
\ No newline at end of file
diff --git a/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.Default.conf b/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.Default.conf
new file mode 100644 (file)
index 0000000..7049afe
--- /dev/null
@@ -0,0 +1,14 @@
+# The domain-specific configuration file for the default domain for
+# use with unit tests.
+#
+# The domain_name of the default domain is 'Default', hence the
+# strange mix of upper/lower case in the file name.
+
+[ldap]
+url = fake://memory
+user = cn=Admin
+password = password
+suffix = cn=example,cn=com
+
+[identity]
+driver = keystone.identity.backends.ldap.Identity
\ No newline at end of file
diff --git a/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain1.conf b/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain1.conf
new file mode 100644 (file)
index 0000000..6b7e248
--- /dev/null
@@ -0,0 +1,11 @@
+# The domain-specific configuration file for the test domain
+# 'domain1' for use with unit tests.
+
+[ldap]
+url = fake://memory1
+user = cn=Admin
+password = password
+suffix = cn=example,cn=com
+
+[identity]
+driver = keystone.identity.backends.ldap.Identity
\ No newline at end of file
diff --git a/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain2.conf b/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain2.conf
new file mode 100644 (file)
index 0000000..0ed68eb
--- /dev/null
@@ -0,0 +1,13 @@
+# The domain-specific configuration file for the test domain
+# 'domain2' for use with unit tests.
+
+[ldap]
+url = fake://memory
+user = cn=Admin
+password = password
+suffix = cn=myroot,cn=com
+group_tree_dn = ou=UserGroups,dc=myroot,dc=org
+user_tree_dn = ou=Users,dc=myroot,dc=org
+
+[identity]
+driver = keystone.identity.backends.ldap.Identity
\ No newline at end of file
diff --git a/keystone-moon/keystone/tests/unit/config_files/domain_configs_one_extra_sql/keystone.domain2.conf b/keystone-moon/keystone/tests/unit/config_files/domain_configs_one_extra_sql/keystone.domain2.conf
new file mode 100644 (file)
index 0000000..81b4446
--- /dev/null
@@ -0,0 +1,5 @@
+# The domain-specific configuration file for the test domain
+# 'domain2' for use with unit tests.
+
+[identity]
+driver = keystone.identity.backends.sql.Identity
\ No newline at end of file
diff --git a/keystone-moon/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.Default.conf b/keystone-moon/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.Default.conf
new file mode 100644 (file)
index 0000000..7049afe
--- /dev/null
@@ -0,0 +1,14 @@
+# The domain-specific configuration file for the default domain for
+# use with unit tests.
+#
+# The domain_name of the default domain is 'Default', hence the
+# strange mix of upper/lower case in the file name.
+
+[ldap]
+url = fake://memory
+user = cn=Admin
+password = password
+suffix = cn=example,cn=com
+
+[identity]
+driver = keystone.identity.backends.ldap.Identity
\ No newline at end of file
diff --git a/keystone-moon/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.domain1.conf b/keystone-moon/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.domain1.conf
new file mode 100644 (file)
index 0000000..a4492a6
--- /dev/null
@@ -0,0 +1,5 @@
+# The domain-specific configuration file for the test domain
+# 'domain1' for use with unit tests.
+
+[identity]
+driver = keystone.identity.backends.sql.Identity
\ No newline at end of file
diff --git a/keystone-moon/keystone/tests/unit/config_files/test_auth_plugin.conf b/keystone-moon/keystone/tests/unit/config_files/test_auth_plugin.conf
new file mode 100644 (file)
index 0000000..abcc43b
--- /dev/null
@@ -0,0 +1,7 @@
+[auth]
+methods = external,password,token,simple_challenge_response,saml2,openid,x509
+simple_challenge_response = keystone.tests.unit.test_auth_plugin.SimpleChallengeResponse
+saml2 = keystone.auth.plugins.mapped.Mapped
+openid = keystone.auth.plugins.mapped.Mapped
+x509 = keystone.auth.plugins.mapped.Mapped
+
diff --git a/keystone-moon/keystone/tests/unit/core.py b/keystone-moon/keystone/tests/unit/core.py
new file mode 100644 (file)
index 0000000..caca7db
--- /dev/null
@@ -0,0 +1,660 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from __future__ import absolute_import
+import atexit
+import functools
+import logging
+import os
+import re
+import shutil
+import socket
+import sys
+import warnings
+
+import fixtures
+from oslo_config import cfg
+from oslo_config import fixture as config_fixture
+from oslo_log import log
+import oslotest.base as oslotest
+from oslotest import mockpatch
+import six
+from sqlalchemy import exc
+from testtools import testcase
+import webob
+
+# NOTE(ayoung)
+# environment.use_eventlet must run before any of the code that will
+# call the eventlet monkeypatching.
+from keystone.common import environment  # noqa
+environment.use_eventlet()
+
+from keystone import auth
+from keystone.common import config as common_cfg
+from keystone.common import dependency
+from keystone.common import kvs
+from keystone.common.kvs import core as kvs_core
+from keystone import config
+from keystone import controllers
+from keystone import exception
+from keystone import notifications
+from keystone.policy.backends import rules
+from keystone.server import common
+from keystone import service
+from keystone.tests.unit import ksfixtures
+
+
+config.configure()
+
+LOG = log.getLogger(__name__)
+PID = six.text_type(os.getpid())
+TESTSDIR = os.path.dirname(os.path.abspath(__file__))
+TESTCONF = os.path.join(TESTSDIR, 'config_files')
+ROOTDIR = os.path.normpath(os.path.join(TESTSDIR, '..', '..', '..'))
+VENDOR = os.path.join(ROOTDIR, 'vendor')
+ETCDIR = os.path.join(ROOTDIR, 'etc')
+
+
+def _calc_tmpdir():
+    env_val = os.environ.get('KEYSTONE_TEST_TEMP_DIR')
+    if not env_val:
+        return os.path.join(TESTSDIR, 'tmp', PID)
+    return os.path.join(env_val, PID)
+
+
+TMPDIR = _calc_tmpdir()
+
+CONF = cfg.CONF
+log.register_options(CONF)
+rules.init()
+
+IN_MEM_DB_CONN_STRING = 'sqlite://'
+
+exception._FATAL_EXCEPTION_FORMAT_ERRORS = True
+os.makedirs(TMPDIR)
+atexit.register(shutil.rmtree, TMPDIR)
+
+
+class dirs(object):
+    @staticmethod
+    def root(*p):
+        return os.path.join(ROOTDIR, *p)
+
+    @staticmethod
+    def etc(*p):
+        return os.path.join(ETCDIR, *p)
+
+    @staticmethod
+    def tests(*p):
+        return os.path.join(TESTSDIR, *p)
+
+    @staticmethod
+    def tmp(*p):
+        return os.path.join(TMPDIR, *p)
+
+    @staticmethod
+    def tests_conf(*p):
+        return os.path.join(TESTCONF, *p)
+
+
+# keystone.common.sql.initialize() for testing.
+DEFAULT_TEST_DB_FILE = dirs.tmp('test.db')
+
+
+@atexit.register
+def remove_test_databases():
+    db = dirs.tmp('test.db')
+    if os.path.exists(db):
+        os.unlink(db)
+    pristine = dirs.tmp('test.db.pristine')
+    if os.path.exists(pristine):
+        os.unlink(pristine)
+
+
+def generate_paste_config(extension_name):
+    # Generate a file, based on keystone-paste.ini, that is named:
+    # extension_name.ini, and includes extension_name in the pipeline
+    with open(dirs.etc('keystone-paste.ini'), 'r') as f:
+        contents = f.read()
+
+    new_contents = contents.replace(' service_v3',
+                                    ' %s service_v3' % (extension_name))
+
+    new_paste_file = dirs.tmp(extension_name + '.ini')
+    with open(new_paste_file, 'w') as f:
+        f.write(new_contents)
+
+    return new_paste_file
+
+
+def remove_generated_paste_config(extension_name):
+    # Remove the generated paste config file, named extension_name.ini
+    paste_file_to_remove = dirs.tmp(extension_name + '.ini')
+    os.remove(paste_file_to_remove)
+
+
+def skip_if_cache_disabled(*sections):
+    """This decorator is used to skip a test if caching is disabled either
+    globally or for the specific section.
+
+    In the code fragment::
+
+        @skip_if_cache_is_disabled('assignment', 'token')
+        def test_method(*args):
+            ...
+
+    The method test_method would be skipped if caching is disabled globally via
+    the `enabled` option in the `cache` section of the configuration or if
+    the `caching` option is set to false in either `assignment` or `token`
+    sections of the configuration.  This decorator can be used with no
+    arguments to only check global caching.
+
+    If a specified configuration section does not define the `caching` option,
+    this decorator makes the same assumption as the `should_cache_fn` in
+    keystone.common.cache that caching should be enabled.
+    """
+    def wrapper(f):
+        @functools.wraps(f)
+        def inner(*args, **kwargs):
+            if not CONF.cache.enabled:
+                raise testcase.TestSkipped('Cache globally disabled.')
+            for s in sections:
+                conf_sec = getattr(CONF, s, None)
+                if conf_sec is not None:
+                    if not getattr(conf_sec, 'caching', True):
+                        raise testcase.TestSkipped('%s caching disabled.' % s)
+            return f(*args, **kwargs)
+        return inner
+    return wrapper
+
+
+def skip_if_no_multiple_domains_support(f):
+    """This decorator is used to skip a test if an identity driver
+    does not support multiple domains.
+    """
+    @functools.wraps(f)
+    def wrapper(*args, **kwargs):
+        test_obj = args[0]
+        if not test_obj.identity_api.multiple_domains_supported:
+            raise testcase.TestSkipped('No multiple domains support')
+        return f(*args, **kwargs)
+    return wrapper
+
+
+class UnexpectedExit(Exception):
+    pass
+
+
+class BadLog(Exception):
+    """Raised on invalid call to logging (parameter mismatch)."""
+    pass
+
+
+class TestClient(object):
+    def __init__(self, app=None, token=None):
+        self.app = app
+        self.token = token
+
+    def request(self, method, path, headers=None, body=None):
+        if headers is None:
+            headers = {}
+
+        if self.token:
+            headers.setdefault('X-Auth-Token', self.token)
+
+        req = webob.Request.blank(path)
+        req.method = method
+        for k, v in six.iteritems(headers):
+            req.headers[k] = v
+        if body:
+            req.body = body
+        return req.get_response(self.app)
+
+    def get(self, path, headers=None):
+        return self.request('GET', path=path, headers=headers)
+
+    def post(self, path, headers=None, body=None):
+        return self.request('POST', path=path, headers=headers, body=body)
+
+    def put(self, path, headers=None, body=None):
+        return self.request('PUT', path=path, headers=headers, body=body)
+
+
+class BaseTestCase(oslotest.BaseTestCase):
+    """Light weight base test class.
+
+    This is a placeholder that will eventually go away once the
+    setup/teardown in TestCase is properly trimmed down to the bare
+    essentials. This is really just a play to speed up the tests by
+    eliminating unnecessary work.
+    """
+
+    def setUp(self):
+        super(BaseTestCase, self).setUp()
+        self.useFixture(mockpatch.PatchObject(sys, 'exit',
+                                              side_effect=UnexpectedExit))
+
+    def cleanup_instance(self, *names):
+        """Create a function suitable for use with self.addCleanup.
+
+        :returns: a callable that uses a closure to delete instance attributes
+
+        """
+        def cleanup():
+            for name in names:
+                # TODO(dstanek): remove this 'if' statement once
+                # load_backend in test_backend_ldap is only called once
+                # per test
+                if hasattr(self, name):
+                    delattr(self, name)
+        return cleanup
+
+
+@dependency.requires('revoke_api')
+class TestCase(BaseTestCase):
+
+    def config_files(self):
+        return []
+
+    def config_overrides(self):
+        signing_certfile = 'examples/pki/certs/signing_cert.pem'
+        signing_keyfile = 'examples/pki/private/signing_key.pem'
+        self.config_fixture.config(group='oslo_policy',
+                                   policy_file=dirs.etc('policy.json'))
+        self.config_fixture.config(
+            # TODO(morganfainberg): Make Cache Testing a separate test case
+            # in tempest, and move it out of the base unit tests.
+            group='cache',
+            backend='dogpile.cache.memory',
+            enabled=True,
+            proxies=['keystone.tests.unit.test_cache.CacheIsolatingProxy'])
+        self.config_fixture.config(
+            group='catalog',
+            driver='keystone.catalog.backends.templated.Catalog',
+            template_file=dirs.tests('default_catalog.templates'))
+        self.config_fixture.config(
+            group='identity',
+            driver='keystone.identity.backends.sql.Identity')
+        self.config_fixture.config(
+            group='kvs',
+            backends=[
+                ('keystone.tests.unit.test_kvs.'
+                 'KVSBackendForcedKeyMangleFixture'),
+                'keystone.tests.unit.test_kvs.KVSBackendFixture'])
+        self.config_fixture.config(
+            group='revoke',
+            driver='keystone.contrib.revoke.backends.kvs.Revoke')
+        self.config_fixture.config(
+            group='signing', certfile=signing_certfile,
+            keyfile=signing_keyfile,
+            ca_certs='examples/pki/certs/cacert.pem')
+        self.config_fixture.config(
+            group='token',
+            driver='keystone.token.persistence.backends.kvs.Token')
+        self.config_fixture.config(
+            group='trust',
+            driver='keystone.trust.backends.sql.Trust')
+        self.config_fixture.config(
+            group='saml', certfile=signing_certfile, keyfile=signing_keyfile)
+        self.config_fixture.config(
+            default_log_levels=[
+                'amqp=WARN',
+                'amqplib=WARN',
+                'boto=WARN',
+                'qpid=WARN',
+                'sqlalchemy=WARN',
+                'suds=INFO',
+                'oslo.messaging=INFO',
+                'iso8601=WARN',
+                'requests.packages.urllib3.connectionpool=WARN',
+                'routes.middleware=INFO',
+                'stevedore.extension=INFO',
+                'keystone.notifications=INFO',
+                'keystone.common._memcache_pool=INFO',
+                'keystone.common.ldap=INFO',
+            ])
+        self.auth_plugin_config_override()
+
+    def auth_plugin_config_override(self, methods=None, **method_classes):
+        if methods is None:
+            methods = ['external', 'password', 'token', ]
+            if not method_classes:
+                method_classes = dict(
+                    external='keystone.auth.plugins.external.DefaultDomain',
+                    password='keystone.auth.plugins.password.Password',
+                    token='keystone.auth.plugins.token.Token',
+                )
+        self.config_fixture.config(group='auth', methods=methods)
+        common_cfg.setup_authentication()
+        if method_classes:
+            self.config_fixture.config(group='auth', **method_classes)
+
+    def setUp(self):
+        super(TestCase, self).setUp()
+        self.addCleanup(self.cleanup_instance('config_fixture', 'logger'))
+
+        self.addCleanup(CONF.reset)
+
+        self.useFixture(mockpatch.PatchObject(logging.Handler, 'handleError',
+                                              side_effect=BadLog))
+        self.config_fixture = self.useFixture(config_fixture.Config(CONF))
+        self.config(self.config_files())
+
+        # NOTE(morganfainberg): mock the auth plugin setup to use the config
+        # fixture which automatically unregisters options when performing
+        # cleanup.
+        def mocked_register_auth_plugin_opt(conf, opt):
+            self.config_fixture.register_opt(opt, group='auth')
+        self.register_auth_plugin_opt_patch = self.useFixture(
+            mockpatch.PatchObject(common_cfg, '_register_auth_plugin_opt',
+                                  new=mocked_register_auth_plugin_opt))
+
+        self.config_overrides()
+
+        self.logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
+
+        # NOTE(morganfainberg): This code is a copy from the oslo-incubator
+        # log module. This is not in a function or otherwise available to use
+        # without having a CONF object to setup logging. This should help to
+        # reduce the log size by limiting what we log (similar to how Keystone
+        # would run under mod_wsgi or eventlet).
+        for pair in CONF.default_log_levels:
+            mod, _sep, level_name = pair.partition('=')
+            logger = logging.getLogger(mod)
+            logger.setLevel(level_name)
+
+        warnings.filterwarnings('error', category=DeprecationWarning,
+                                module='^keystone\\.')
+        warnings.simplefilter('error', exc.SAWarning)
+        self.addCleanup(warnings.resetwarnings)
+
+        self.useFixture(ksfixtures.Cache())
+
+        # Clear the registry of providers so that providers from previous
+        # tests aren't used.
+        self.addCleanup(dependency.reset)
+
+        self.addCleanup(kvs.INMEMDB.clear)
+
+        # Ensure Notification subscriptions and resource types are empty
+        self.addCleanup(notifications.clear_subscribers)
+        self.addCleanup(notifications.reset_notifier)
+
+        # Reset the auth-plugin registry
+        self.addCleanup(self.clear_auth_plugin_registry)
+
+        self.addCleanup(setattr, controllers, '_VERSIONS', [])
+
+    def config(self, config_files):
+        CONF(args=[], project='keystone', default_config_files=config_files)
+
+    def load_backends(self):
+        """Initializes each manager and assigns them to an attribute."""
+
+        # TODO(blk-u): Shouldn't need to clear the registry here, but some
+        # tests call load_backends multiple times. These should be fixed to
+        # only call load_backends once.
+        dependency.reset()
+
+        # TODO(morganfainberg): Shouldn't need to clear the registry here, but
+        # some tests call load_backends multiple times.  Since it is not
+        # possible to re-configure a backend, we need to clear the list.  This
+        # should eventually be removed once testing has been cleaned up.
+        kvs_core.KEY_VALUE_STORE_REGISTRY.clear()
+
+        self.clear_auth_plugin_registry()
+        drivers, _unused = common.setup_backends(
+            load_extra_backends_fn=self.load_extra_backends)
+
+        for manager_name, manager in six.iteritems(drivers):
+            setattr(self, manager_name, manager)
+        self.addCleanup(self.cleanup_instance(*drivers.keys()))
+
+    def load_extra_backends(self):
+        """Override to load managers that aren't loaded by default.
+
+        This is useful to load managers initialized by extensions. No extra
+        backends are loaded by default.
+
+        :return: dict of name -> manager
+        """
+        return {}
+
+    def load_fixtures(self, fixtures):
+        """Hacky basic and naive fixture loading based on a python module.
+
+        Expects that the various APIs into the various services are already
+        defined on `self`.
+
+        """
+        # NOTE(dstanek): create a list of attribute names to be removed
+        # from this instance during cleanup
+        fixtures_to_cleanup = []
+
+        # TODO(termie): doing something from json, probably based on Django's
+        #               loaddata will be much preferred.
+        if (hasattr(self, 'identity_api') and
+            hasattr(self, 'assignment_api') and
+                hasattr(self, 'resource_api')):
+            for domain in fixtures.DOMAINS:
+                try:
+                    rv = self.resource_api.create_domain(domain['id'], domain)
+                except exception.Conflict:
+                    rv = self.resource_api.get_domain(domain['id'])
+                except exception.NotImplemented:
+                    rv = domain
+                attrname = 'domain_%s' % domain['id']
+                setattr(self, attrname, rv)
+                fixtures_to_cleanup.append(attrname)
+
+            for tenant in fixtures.TENANTS:
+                if hasattr(self, 'tenant_%s' % tenant['id']):
+                    try:
+                        # This will clear out any roles on the project as well
+                        self.resource_api.delete_project(tenant['id'])
+                    except exception.ProjectNotFound:
+                        pass
+                rv = self.resource_api.create_project(
+                    tenant['id'], tenant)
+
+                attrname = 'tenant_%s' % tenant['id']
+                setattr(self, attrname, rv)
+                fixtures_to_cleanup.append(attrname)
+
+            for role in fixtures.ROLES:
+                try:
+                    rv = self.role_api.create_role(role['id'], role)
+                except exception.Conflict:
+                    rv = self.role_api.get_role(role['id'])
+                attrname = 'role_%s' % role['id']
+                setattr(self, attrname, rv)
+                fixtures_to_cleanup.append(attrname)
+
+            for user in fixtures.USERS:
+                user_copy = user.copy()
+                tenants = user_copy.pop('tenants')
+                try:
+                    existing_user = getattr(self, 'user_%s' % user['id'], None)
+                    if existing_user is not None:
+                        self.identity_api.delete_user(existing_user['id'])
+                except exception.UserNotFound:
+                    pass
+
+                # For users, the manager layer will generate the ID
+                user_copy = self.identity_api.create_user(user_copy)
+                # Our tests expect that the password is still in the user
+                # record so that they can reference it, so put it back into
+                # the dict returned.
+                user_copy['password'] = user['password']
+
+                for tenant_id in tenants:
+                    try:
+                        self.assignment_api.add_user_to_project(
+                            tenant_id, user_copy['id'])
+                    except exception.Conflict:
+                        pass
+                # Use the ID from the fixture as the attribute name, so
+                # that our tests can easily reference each user dict, while
+                # the ID in the dict will be the real public ID.
+                attrname = 'user_%s' % user['id']
+                setattr(self, attrname, user_copy)
+                fixtures_to_cleanup.append(attrname)
+
+            self.addCleanup(self.cleanup_instance(*fixtures_to_cleanup))
+
+    def _paste_config(self, config):
+        if not config.startswith('config:'):
+            test_path = os.path.join(TESTSDIR, config)
+            etc_path = os.path.join(ROOTDIR, 'etc', config)
+            for path in [test_path, etc_path]:
+                if os.path.exists('%s-paste.ini' % path):
+                    return 'config:%s-paste.ini' % path
+        return config
+
+    def loadapp(self, config, name='main'):
+        return service.loadapp(self._paste_config(config), name=name)
+
+    def clear_auth_plugin_registry(self):
+        auth.controllers.AUTH_METHODS.clear()
+        auth.controllers.AUTH_PLUGINS_LOADED = False
+
+    def assertCloseEnoughForGovernmentWork(self, a, b, delta=3):
+        """Asserts that two datetimes are nearly equal within a small delta.
+
+        :param delta: Maximum allowable time delta, defined in seconds.
+        """
+        msg = '%s != %s within %s delta' % (a, b, delta)
+
+        self.assertTrue(abs(a - b).seconds <= delta, msg)
+
+    def assertNotEmpty(self, l):
+        self.assertTrue(len(l))
+
+    def assertDictEqual(self, d1, d2, msg=None):
+        self.assertIsInstance(d1, dict)
+        self.assertIsInstance(d2, dict)
+        self.assertEqual(d1, d2, msg)
+
+    def assertRaisesRegexp(self, expected_exception, expected_regexp,
+                           callable_obj, *args, **kwargs):
+        """Asserts that the message in a raised exception matches a regexp.
+        """
+        try:
+            callable_obj(*args, **kwargs)
+        except expected_exception as exc_value:
+            if isinstance(expected_regexp, six.string_types):
+                expected_regexp = re.compile(expected_regexp)
+
+            if isinstance(exc_value.args[0], unicode):
+                if not expected_regexp.search(unicode(exc_value)):
+                    raise self.failureException(
+                        '"%s" does not match "%s"' %
+                        (expected_regexp.pattern, unicode(exc_value)))
+            else:
+                if not expected_regexp.search(str(exc_value)):
+                    raise self.failureException(
+                        '"%s" does not match "%s"' %
+                        (expected_regexp.pattern, str(exc_value)))
+        else:
+            if hasattr(expected_exception, '__name__'):
+                excName = expected_exception.__name__
+            else:
+                excName = str(expected_exception)
+            raise self.failureException("%s not raised" % excName)
+
+    def assertDictContainsSubset(self, expected, actual, msg=None):
+        """Checks whether actual is a superset of expected."""
+
+        def safe_repr(obj, short=False):
+            _MAX_LENGTH = 80
+            try:
+                result = repr(obj)
+            except Exception:
+                result = object.__repr__(obj)
+            if not short or len(result) < _MAX_LENGTH:
+                return result
+            return result[:_MAX_LENGTH] + ' [truncated]...'
+
+        missing = []
+        mismatched = []
+        for key, value in six.iteritems(expected):
+            if key not in actual:
+                missing.append(key)
+            elif value != actual[key]:
+                mismatched.append('%s, expected: %s, actual: %s' %
+                                  (safe_repr(key), safe_repr(value),
+                                   safe_repr(actual[key])))
+
+        if not (missing or mismatched):
+            return
+
+        standardMsg = ''
+        if missing:
+            standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
+                                                   missing)
+        if mismatched:
+            if standardMsg:
+                standardMsg += '; '
+            standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
+
+        self.fail(self._formatMessage(msg, standardMsg))
+
+    @property
+    def ipv6_enabled(self):
+        if socket.has_ipv6:
+            sock = None
+            try:
+                sock = socket.socket(socket.AF_INET6)
+                # NOTE(Mouad): Try to bind to IPv6 loopback ip address.
+                sock.bind(("::1", 0))
+                return True
+            except socket.error:
+                pass
+            finally:
+                if sock:
+                    sock.close()
+        return False
+
+    def skip_if_no_ipv6(self):
+        if not self.ipv6_enabled:
+            raise self.skipTest("IPv6 is not enabled in the system")
+
+    def skip_if_env_not_set(self, env_var):
+        if not os.environ.get(env_var):
+            self.skipTest('Env variable %s is not set.' % env_var)
+
+
+class SQLDriverOverrides(object):
+    """A mixin for consolidating sql-specific test overrides."""
+    def config_overrides(self):
+        super(SQLDriverOverrides, self).config_overrides()
+        # SQL specific driver overrides
+        self.config_fixture.config(
+            group='catalog',
+            driver='keystone.catalog.backends.sql.Catalog')
+        self.config_fixture.config(
+            group='identity',
+            driver='keystone.identity.backends.sql.Identity')
+        self.config_fixture.config(
+            group='policy',
+            driver='keystone.policy.backends.sql.Policy')
+        self.config_fixture.config(
+            group='revoke',
+            driver='keystone.contrib.revoke.backends.sql.Revoke')
+        self.config_fixture.config(
+            group='token',
+            driver='keystone.token.persistence.backends.sql.Token')
+        self.config_fixture.config(
+            group='trust',
+            driver='keystone.trust.backends.sql.Trust')
diff --git a/keystone-moon/keystone/tests/unit/default_catalog.templates b/keystone-moon/keystone/tests/unit/default_catalog.templates
new file mode 100644 (file)
index 0000000..faf87eb
--- /dev/null
@@ -0,0 +1,14 @@
+# config for templated.Catalog, using camelCase because I don't want to do
+# translations for keystone compat
+catalog.RegionOne.identity.publicURL = http://localhost:$(public_port)s/v2.0
+catalog.RegionOne.identity.adminURL = http://localhost:$(admin_port)s/v2.0
+catalog.RegionOne.identity.internalURL = http://localhost:$(admin_port)s/v2.0
+catalog.RegionOne.identity.name = 'Identity Service'
+catalog.RegionOne.identity.id = 1
+
+# fake compute service for now to help novaclient tests work
+catalog.RegionOne.compute.publicURL = http://localhost:8774/v1.1/$(tenant_id)s
+catalog.RegionOne.compute.adminURL = http://localhost:8774/v1.1/$(tenant_id)s
+catalog.RegionOne.compute.internalURL = http://localhost:8774/v1.1/$(tenant_id)s
+catalog.RegionOne.compute.name = 'Compute Service'
+catalog.RegionOne.compute.id = 2
diff --git a/keystone-moon/keystone/tests/unit/default_fixtures.py b/keystone-moon/keystone/tests/unit/default_fixtures.py
new file mode 100644 (file)
index 0000000..f7e2064
--- /dev/null
@@ -0,0 +1,121 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# NOTE(dolph): please try to avoid additional fixtures if possible; test suite
+#              performance may be negatively affected.
+
+DEFAULT_DOMAIN_ID = 'default'
+
+TENANTS = [
+    {
+        'id': 'bar',
+        'name': 'BAR',
+        'domain_id': DEFAULT_DOMAIN_ID,
+        'description': 'description',
+        'enabled': True,
+        'parent_id': None,
+    }, {
+        'id': 'baz',
+        'name': 'BAZ',
+        'domain_id': DEFAULT_DOMAIN_ID,
+        'description': 'description',
+        'enabled': True,
+        'parent_id': None,
+    }, {
+        'id': 'mtu',
+        'name': 'MTU',
+        'description': 'description',
+        'enabled': True,
+        'domain_id': DEFAULT_DOMAIN_ID,
+        'parent_id': None,
+    }, {
+        'id': 'service',
+        'name': 'service',
+        'description': 'description',
+        'enabled': True,
+        'domain_id': DEFAULT_DOMAIN_ID,
+        'parent_id': None,
+    }
+]
+
+# NOTE(ja): a role of keystone_admin is done in setUp
+USERS = [
+    {
+        'id': 'foo',
+        'name': 'FOO',
+        'domain_id': DEFAULT_DOMAIN_ID,
+        'password': 'foo2',
+        'tenants': ['bar'],
+        'enabled': True,
+        'email': 'foo@bar.com',
+    }, {
+        'id': 'two',
+        'name': 'TWO',
+        'domain_id': DEFAULT_DOMAIN_ID,
+        'password': 'two2',
+        'enabled': True,
+        'default_project_id': 'baz',
+        'tenants': ['baz'],
+        'email': 'two@three.com',
+    }, {
+        'id': 'badguy',
+        'name': 'BadGuy',
+        'domain_id': DEFAULT_DOMAIN_ID,
+        'password': 'bad',
+        'enabled': False,
+        'default_project_id': 'baz',
+        'tenants': ['baz'],
+        'email': 'bad@guy.com',
+    }, {
+        'id': 'sna',
+        'name': 'SNA',
+        'domain_id': DEFAULT_DOMAIN_ID,
+        'password': 'snafu',
+        'enabled': True,
+        'tenants': ['bar'],
+        'email': 'sna@snl.coom',
+    }
+]
+
+ROLES = [
+    {
+        'id': 'admin',
+        'name': 'admin',
+    }, {
+        'id': 'member',
+        'name': 'Member',
+    }, {
+        'id': '9fe2ff9ee4384b1894a90878d3e92bab',
+        'name': '_member_',
+    }, {
+        'id': 'other',
+        'name': 'Other',
+    }, {
+        'id': 'browser',
+        'name': 'Browser',
+    }, {
+        'id': 'writer',
+        'name': 'Writer',
+    }, {
+        'id': 'service',
+        'name': 'Service',
+    }
+]
+
+DOMAINS = [{'description':
+            (u'Owns users and tenants (i.e. projects)'
+                ' available on Identity API v2.'),
+            'enabled': True,
+            'id': DEFAULT_DOMAIN_ID,
+            'name': u'Default'}]
diff --git a/keystone-moon/keystone/tests/unit/fakeldap.py b/keystone-moon/keystone/tests/unit/fakeldap.py
new file mode 100644 (file)
index 0000000..85aaadf
--- /dev/null
@@ -0,0 +1,602 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Fake LDAP server for test harness.
+
+This class does very little error checking, and knows nothing about ldap
+class definitions.  It implements the minimum emulation of the python ldap
+library to work with nova.
+
+"""
+
+import re
+import shelve
+
+import ldap
+from oslo_config import cfg
+from oslo_log import log
+import six
+from six import moves
+
+from keystone.common.ldap import core
+from keystone import exception
+
+
+SCOPE_NAMES = {
+    ldap.SCOPE_BASE: 'SCOPE_BASE',
+    ldap.SCOPE_ONELEVEL: 'SCOPE_ONELEVEL',
+    ldap.SCOPE_SUBTREE: 'SCOPE_SUBTREE',
+}
+
+# http://msdn.microsoft.com/en-us/library/windows/desktop/aa366991(v=vs.85).aspx  # noqa
+CONTROL_TREEDELETE = '1.2.840.113556.1.4.805'
+
+LOG = log.getLogger(__name__)
+CONF = cfg.CONF
+
+
+def _internal_attr(attr_name, value_or_values):
+    def normalize_value(value):
+        return core.utf8_decode(value)
+
+    def normalize_dn(dn):
+        # Capitalize the attribute names as an LDAP server might.
+
+        # NOTE(blk-u): Special case for this tested value, used with
+        # test_user_id_comma. The call to str2dn here isn't always correct
+        # here, because `dn` is escaped for an LDAP filter. str2dn() normally
+        # works only because there's no special characters in `dn`.
+        if dn == 'cn=Doe\\5c, John,ou=Users,cn=example,cn=com':
+            return 'CN=Doe\\, John,OU=Users,CN=example,CN=com'
+
+        # NOTE(blk-u): Another special case for this tested value. When a
+        # roleOccupant has an escaped comma, it gets converted to \2C.
+        if dn == 'cn=Doe\\, John,ou=Users,cn=example,cn=com':
+            return 'CN=Doe\\2C John,OU=Users,CN=example,CN=com'
+
+        dn = ldap.dn.str2dn(core.utf8_encode(dn))
+        norm = []
+        for part in dn:
+            name, val, i = part[0]
+            name = core.utf8_decode(name)
+            name = name.upper()
+            name = core.utf8_encode(name)
+            norm.append([(name, val, i)])
+        return core.utf8_decode(ldap.dn.dn2str(norm))
+
+    if attr_name in ('member', 'roleOccupant'):
+        attr_fn = normalize_dn
+    else:
+        attr_fn = normalize_value
+
+    if isinstance(value_or_values, list):
+        return [attr_fn(x) for x in value_or_values]
+    return [attr_fn(value_or_values)]
+
+
+def _match_query(query, attrs):
+    """Match an ldap query to an attribute dictionary.
+
+    The characters &, |, and ! are supported in the query. No syntax checking
+    is performed, so malformed queries will not work correctly.
+    """
+    # cut off the parentheses
+    inner = query[1:-1]
+    if inner.startswith(('&', '|')):
+        if inner[0] == '&':
+            matchfn = all
+        else:
+            matchfn = any
+        # cut off the & or |
+        groups = _paren_groups(inner[1:])
+        return matchfn(_match_query(group, attrs) for group in groups)
+    if inner.startswith('!'):
+        # cut off the ! and the nested parentheses
+        return not _match_query(query[2:-1], attrs)
+
+    (k, _sep, v) = inner.partition('=')
+    return _match(k, v, attrs)
+
+
+def _paren_groups(source):
+    """Split a string into parenthesized groups."""
+    count = 0
+    start = 0
+    result = []
+    for pos in moves.range(len(source)):
+        if source[pos] == '(':
+            if count == 0:
+                start = pos
+            count += 1
+        if source[pos] == ')':
+            count -= 1
+            if count == 0:
+                result.append(source[start:pos + 1])
+    return result
+
+
+def _match(key, value, attrs):
+    """Match a given key and value against an attribute list."""
+
+    def match_with_wildcards(norm_val, val_list):
+        # Case insensitive checking with wildcards
+        if norm_val.startswith('*'):
+            if norm_val.endswith('*'):
+                # Is the string anywhere in the target?
+                for x in val_list:
+                    if norm_val[1:-1] in x:
+                        return True
+            else:
+                # Is the string at the end of the target?
+                for x in val_list:
+                    if (norm_val[1:] ==
+                            x[len(x) - len(norm_val) + 1:]):
+                        return True
+        elif norm_val.endswith('*'):
+            # Is the string at the start of the target?
+            for x in val_list:
+                if norm_val[:-1] == x[:len(norm_val) - 1]:
+                    return True
+        else:
+            # Is the string an exact match?
+            for x in val_list:
+                if check_value == x:
+                    return True
+        return False
+
+    if key not in attrs:
+        return False
+    # This is a pure wild card search, so the answer must be yes!
+    if value == '*':
+        return True
+    if key == 'serviceId':
+        # for serviceId, the backend is returning a list of numbers
+        # make sure we convert them to strings first before comparing
+        # them
+        str_sids = [six.text_type(x) for x in attrs[key]]
+        return six.text_type(value) in str_sids
+    if key != 'objectclass':
+        check_value = _internal_attr(key, value)[0].lower()
+        norm_values = list(
+            _internal_attr(key, x)[0].lower() for x in attrs[key])
+        return match_with_wildcards(check_value, norm_values)
+    # it is an objectclass check, so check subclasses
+    values = _subs(value)
+    for v in values:
+        if v in attrs[key]:
+            return True
+    return False
+
+
+def _subs(value):
+    """Returns a list of subclass strings.
+
+    The strings represent the ldap objectclass plus any subclasses that
+    inherit from it. Fakeldap doesn't know about the ldap object structure,
+    so subclasses need to be defined manually in the dictionary below.
+
+    """
+    subs = {'groupOfNames': ['keystoneTenant',
+                             'keystoneRole',
+                             'keystoneTenantRole']}
+    if value in subs:
+        return [value] + subs[value]
+    return [value]
+
+
+server_fail = False
+
+
+class FakeShelve(dict):
+
+    def sync(self):
+        pass
+
+
+FakeShelves = {}
+
+
+class FakeLdap(core.LDAPHandler):
+    '''Emulate the python-ldap API.
+
+    The python-ldap API requires all strings to be UTF-8 encoded. This
+    is assured by the caller of this interface
+    (i.e. KeystoneLDAPHandler).
+
+    However, internally this emulation MUST process and store strings
+    in a canonical form which permits operations on
+    characters. Encoded strings do not provide the ability to operate
+    on characters. Therefore this emulation accepts UTF-8 encoded
+    strings, decodes them to unicode for operations internal to this
+    emulation, and encodes them back to UTF-8 when returning values
+    from the emulation.
+    '''
+
+    __prefix = 'ldap:'
+
+    def __init__(self, conn=None):
+        super(FakeLdap, self).__init__(conn=conn)
+        self._ldap_options = {ldap.OPT_DEREF: ldap.DEREF_NEVER}
+
+    def connect(self, url, page_size=0, alias_dereferencing=None,
+                use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
+                tls_req_cert='demand', chase_referrals=None, debug_level=None,
+                use_pool=None, pool_size=None, pool_retry_max=None,
+                pool_retry_delay=None, pool_conn_timeout=None,
+                pool_conn_lifetime=None):
+        if url.startswith('fake://memory'):
+            if url not in FakeShelves:
+                FakeShelves[url] = FakeShelve()
+            self.db = FakeShelves[url]
+        else:
+            self.db = shelve.open(url[7:])
+
+        using_ldaps = url.lower().startswith("ldaps")
+
+        if use_tls and using_ldaps:
+            raise AssertionError('Invalid TLS / LDAPS combination')
+
+        if use_tls:
+            if tls_cacertfile:
+                ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, tls_cacertfile)
+            elif tls_cacertdir:
+                ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir)
+            if tls_req_cert in core.LDAP_TLS_CERTS.values():
+                ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert)
+            else:
+                raise ValueError("invalid TLS_REQUIRE_CERT tls_req_cert=%s",
+                                 tls_req_cert)
+
+        if alias_dereferencing is not None:
+            self.set_option(ldap.OPT_DEREF, alias_dereferencing)
+        self.page_size = page_size
+
+        self.use_pool = use_pool
+        self.pool_size = pool_size
+        self.pool_retry_max = pool_retry_max
+        self.pool_retry_delay = pool_retry_delay
+        self.pool_conn_timeout = pool_conn_timeout
+        self.pool_conn_lifetime = pool_conn_lifetime
+
+    def dn(self, dn):
+        return core.utf8_decode(dn)
+
+    def _dn_to_id_attr(self, dn):
+        return core.utf8_decode(ldap.dn.str2dn(core.utf8_encode(dn))[0][0][0])
+
+    def _dn_to_id_value(self, dn):
+        return core.utf8_decode(ldap.dn.str2dn(core.utf8_encode(dn))[0][0][1])
+
+    def key(self, dn):
+        return '%s%s' % (self.__prefix, self.dn(dn))
+
+    def simple_bind_s(self, who='', cred='',
+                      serverctrls=None, clientctrls=None):
+        """This method is ignored, but provided for compatibility."""
+        if server_fail:
+            raise ldap.SERVER_DOWN
+        whos = ['cn=Admin', CONF.ldap.user]
+        if who in whos and cred in ['password', CONF.ldap.password]:
+            return
+
+        try:
+            attrs = self.db[self.key(who)]
+        except KeyError:
+            LOG.debug('bind fail: who=%s not found', core.utf8_decode(who))
+            raise ldap.NO_SUCH_OBJECT
+
+        db_password = None
+        try:
+            db_password = attrs['userPassword'][0]
+        except (KeyError, IndexError):
+            LOG.debug('bind fail: password for who=%s not found',
+                      core.utf8_decode(who))
+            raise ldap.INAPPROPRIATE_AUTH
+
+        if cred != db_password:
+            LOG.debug('bind fail: password for who=%s does not match',
+                      core.utf8_decode(who))
+            raise ldap.INVALID_CREDENTIALS
+
+    def unbind_s(self):
+        """This method is ignored, but provided for compatibility."""
+        if server_fail:
+            raise ldap.SERVER_DOWN
+
+    def add_s(self, dn, modlist):
+        """Add an object with the specified attributes at dn."""
+        if server_fail:
+            raise ldap.SERVER_DOWN
+
+        id_attr_in_modlist = False
+        id_attr = self._dn_to_id_attr(dn)
+        id_value = self._dn_to_id_value(dn)
+
+        # The LDAP API raises a TypeError if attr name is None.
+        for k, dummy_v in modlist:
+            if k is None:
+                raise TypeError('must be string, not None. modlist=%s' %
+                                modlist)
+
+            if k == id_attr:
+                for val in dummy_v:
+                    if core.utf8_decode(val) == id_value:
+                        id_attr_in_modlist = True
+
+        if not id_attr_in_modlist:
+            LOG.debug('id_attribute=%(attr)s missing, attributes=%(attrs)s' %
+                      {'attr': id_attr, 'attrs': modlist})
+            raise ldap.NAMING_VIOLATION
+        key = self.key(dn)
+        LOG.debug('add item: dn=%(dn)s, attrs=%(attrs)s', {
+            'dn': core.utf8_decode(dn), 'attrs': modlist})
+        if key in self.db:
+            LOG.debug('add item failed: dn=%s is already in store.',
+                      core.utf8_decode(dn))
+            raise ldap.ALREADY_EXISTS(dn)
+
+        self.db[key] = {k: _internal_attr(k, v) for k, v in modlist}
+        self.db.sync()
+
+    def delete_s(self, dn):
+        """Remove the ldap object at specified dn."""
+        return self.delete_ext_s(dn, serverctrls=[])
+
+    def _getChildren(self, dn):
+        return [k for k, v in six.iteritems(self.db)
+                if re.match('%s.*,%s' % (
+                            re.escape(self.__prefix),
+                            re.escape(self.dn(dn))), k)]
+
+    def delete_ext_s(self, dn, serverctrls, clientctrls=None):
+        """Remove the ldap object at specified dn."""
+        if server_fail:
+            raise ldap.SERVER_DOWN
+
+        try:
+            if CONTROL_TREEDELETE in [c.controlType for c in serverctrls]:
+                LOG.debug('FakeLdap subtree_delete item: dn=%s',
+                          core.utf8_decode(dn))
+                children = self._getChildren(dn)
+                for c in children:
+                    del self.db[c]
+
+            key = self.key(dn)
+            LOG.debug('FakeLdap delete item: dn=%s', core.utf8_decode(dn))
+            del self.db[key]
+        except KeyError:
+            LOG.debug('delete item failed: dn=%s not found.',
+                      core.utf8_decode(dn))
+            raise ldap.NO_SUCH_OBJECT
+        self.db.sync()
+
+    def modify_s(self, dn, modlist):
+        """Modify the object at dn using the attribute list.
+
+        :param dn: an LDAP DN
+        :param modlist: a list of tuples in the following form:
+                      ([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)
+        """
+        if server_fail:
+            raise ldap.SERVER_DOWN
+
+        key = self.key(dn)
+        LOG.debug('modify item: dn=%(dn)s attrs=%(attrs)s', {
+            'dn': core.utf8_decode(dn), 'attrs': modlist})
+        try:
+            entry = self.db[key]
+        except KeyError:
+            LOG.debug('modify item failed: dn=%s not found.',
+                      core.utf8_decode(dn))
+            raise ldap.NO_SUCH_OBJECT
+
+        for cmd, k, v in modlist:
+            values = entry.setdefault(k, [])
+            if cmd == ldap.MOD_ADD:
+                v = _internal_attr(k, v)
+                for x in v:
+                    if x in values:
+                        raise ldap.TYPE_OR_VALUE_EXISTS
+                values += v
+            elif cmd == ldap.MOD_REPLACE:
+                values[:] = _internal_attr(k, v)
+            elif cmd == ldap.MOD_DELETE:
+                if v is None:
+                    if not values:
+                        LOG.debug('modify item failed: '
+                                  'item has no attribute "%s" to delete', k)
+                        raise ldap.NO_SUCH_ATTRIBUTE
+                    values[:] = []
+                else:
+                    for val in _internal_attr(k, v):
+                        try:
+                            values.remove(val)
+                        except ValueError:
+                            LOG.debug('modify item failed: '
+                                      'item has no attribute "%(k)s" with '
+                                      'value "%(v)s" to delete', {
+                                          'k': k, 'v': val})
+                            raise ldap.NO_SUCH_ATTRIBUTE
+            else:
+                LOG.debug('modify item failed: unknown command %s', cmd)
+                raise NotImplementedError('modify_s action %s not'
+                                          ' implemented' % cmd)
+        self.db[key] = entry
+        self.db.sync()
+
+    def search_s(self, base, scope,
+                 filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
+        """Search for all matching objects under base using the query.
+
+        Args:
+        base -- dn to search under
+        scope -- search scope (base, subtree, onelevel)
+        filterstr -- filter objects by
+        attrlist -- attrs to return. Returns all attrs if not specified
+
+        """
+        if server_fail:
+            raise ldap.SERVER_DOWN
+
+        if scope == ldap.SCOPE_BASE:
+            try:
+                item_dict = self.db[self.key(base)]
+            except KeyError:
+                LOG.debug('search fail: dn not found for SCOPE_BASE')
+                raise ldap.NO_SUCH_OBJECT
+            results = [(base, item_dict)]
+        elif scope == ldap.SCOPE_SUBTREE:
+            # FIXME - LDAP search with SUBTREE scope must return the base
+            # entry, but the code below does _not_.  Unfortunately, there are
+            # several tests that depend on this broken behavior, and fail
+            # when the base entry is returned in the search results.  The
+            # fix is easy here, just initialize results as above for
+            # the SCOPE_BASE case.
+            # https://bugs.launchpad.net/keystone/+bug/1368772
+            try:
+                item_dict = self.db[self.key(base)]
+            except KeyError:
+                LOG.debug('search fail: dn not found for SCOPE_SUBTREE')
+                raise ldap.NO_SUCH_OBJECT
+            results = [(base, item_dict)]
+            extraresults = [(k[len(self.__prefix):], v)
+                            for k, v in six.iteritems(self.db)
+                            if re.match('%s.*,%s' %
+                                        (re.escape(self.__prefix),
+                                         re.escape(self.dn(base))), k)]
+            results.extend(extraresults)
+        elif scope == ldap.SCOPE_ONELEVEL:
+
+            def get_entries():
+                base_dn = ldap.dn.str2dn(core.utf8_encode(base))
+                base_len = len(base_dn)
+
+                for k, v in six.iteritems(self.db):
+                    if not k.startswith(self.__prefix):
+                        continue
+                    k_dn_str = k[len(self.__prefix):]
+                    k_dn = ldap.dn.str2dn(core.utf8_encode(k_dn_str))
+                    if len(k_dn) != base_len + 1:
+                        continue
+                    if k_dn[-base_len:] != base_dn:
+                        continue
+                    yield (k_dn_str, v)
+
+            results = list(get_entries())
+
+        else:
+            # openldap client/server raises PROTOCOL_ERROR for unexpected scope
+            raise ldap.PROTOCOL_ERROR
+
+        objects = []
+        for dn, attrs in results:
+            # filter the objects by filterstr
+            id_attr, id_val, _ = ldap.dn.str2dn(core.utf8_encode(dn))[0][0]
+            id_attr = core.utf8_decode(id_attr)
+            id_val = core.utf8_decode(id_val)
+            match_attrs = attrs.copy()
+            match_attrs[id_attr] = [id_val]
+            if not filterstr or _match_query(filterstr, match_attrs):
+                # filter the attributes by attrlist
+                attrs = {k: v for k, v in six.iteritems(attrs)
+                         if not attrlist or k in attrlist}
+                objects.append((dn, attrs))
+
+        return objects
+
+    def set_option(self, option, invalue):
+        self._ldap_options[option] = invalue
+
+    def get_option(self, option):
+        value = self._ldap_options.get(option, None)
+        return value
+
+    def search_ext(self, base, scope,
+                   filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
+                   serverctrls=None, clientctrls=None,
+                   timeout=-1, sizelimit=0):
+        raise exception.NotImplemented()
+
+    def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
+                resp_ctrl_classes=None):
+        raise exception.NotImplemented()
+
+
+class FakeLdapPool(FakeLdap):
+    '''Emulate the python-ldap API with pooled connections using existing
+    FakeLdap logic.
+
+    This class is used as connector class in PooledLDAPHandler.
+    '''
+
+    def __init__(self, uri, retry_max=None, retry_delay=None, conn=None):
+        super(FakeLdapPool, self).__init__(conn=conn)
+        self.url = uri
+        self.connected = None
+        self.conn = self
+        self._connection_time = 5  # any number greater than 0
+
+    def get_lifetime(self):
+        return self._connection_time
+
+    def simple_bind_s(self, who=None, cred=None,
+                      serverctrls=None, clientctrls=None):
+        if self.url.startswith('fakepool://memory'):
+            if self.url not in FakeShelves:
+                FakeShelves[self.url] = FakeShelve()
+            self.db = FakeShelves[self.url]
+        else:
+            self.db = shelve.open(self.url[11:])
+
+        if not who:
+            who = 'cn=Admin'
+        if not cred:
+            cred = 'password'
+
+        super(FakeLdapPool, self).simple_bind_s(who=who, cred=cred,
+                                                serverctrls=serverctrls,
+                                                clientctrls=clientctrls)
+
+    def unbind_ext_s(self):
+        '''Added to extend FakeLdap as connector class.'''
+        pass
+
+
+class FakeLdapNoSubtreeDelete(FakeLdap):
+    """FakeLdap subclass that does not support subtree delete
+
+    Same as FakeLdap except delete will throw the LDAP error
+    ldap.NOT_ALLOWED_ON_NONLEAF if there is an attempt to delete
+    an entry that has children.
+    """
+
+    def delete_ext_s(self, dn, serverctrls, clientctrls=None):
+        """Remove the ldap object at specified dn."""
+        if server_fail:
+            raise ldap.SERVER_DOWN
+
+        try:
+            children = self._getChildren(dn)
+            if children:
+                raise ldap.NOT_ALLOWED_ON_NONLEAF
+
+        except KeyError:
+            LOG.debug('delete item failed: dn=%s not found.',
+                      core.utf8_decode(dn))
+            raise ldap.NO_SUCH_OBJECT
+        super(FakeLdapNoSubtreeDelete, self).delete_ext_s(dn,
+                                                          serverctrls,
+                                                          clientctrls)
diff --git a/keystone-moon/keystone/tests/unit/federation_fixtures.py b/keystone-moon/keystone/tests/unit/federation_fixtures.py
new file mode 100644 (file)
index 0000000..d4527d9
--- /dev/null
@@ -0,0 +1,28 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+IDP_ENTITY_ID = 'https://localhost/v3/OS-FEDERATION/saml2/idp'
+IDP_SSO_ENDPOINT = 'https://localhost/v3/OS-FEDERATION/saml2/SSO'
+
+# Organization info
+IDP_ORGANIZATION_NAME = 'ACME INC'
+IDP_ORGANIZATION_DISPLAY_NAME = 'ACME'
+IDP_ORGANIZATION_URL = 'https://acme.example.com'
+
+# Contact info
+IDP_CONTACT_COMPANY = 'ACME Sub'
+IDP_CONTACT_GIVEN_NAME = 'Joe'
+IDP_CONTACT_SURNAME = 'Hacker'
+IDP_CONTACT_EMAIL = 'joe@acme.example.com'
+IDP_CONTACT_TELEPHONE_NUMBER = '1234567890'
+IDP_CONTACT_TYPE = 'technical'
diff --git a/keystone-moon/keystone/tests/unit/filtering.py b/keystone-moon/keystone/tests/unit/filtering.py
new file mode 100644 (file)
index 0000000..1a31a23
--- /dev/null
@@ -0,0 +1,96 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from oslo_config import cfg
+
+
+CONF = cfg.CONF
+
+
+class FilterTests(object):
+
+    # Provide support for checking if a batch of list items all
+    # exist within a contiguous range in a total list
+    def _match_with_list(self, this_batch, total_list,
+                         batch_size=None,
+                         list_start=None, list_end=None):
+        if batch_size is None:
+            batch_size = len(this_batch)
+        if list_start is None:
+            list_start = 0
+        if list_end is None:
+            list_end = len(total_list)
+        for batch_item in range(0, batch_size):
+            found = False
+            for list_item in range(list_start, list_end):
+                if this_batch[batch_item]['id'] == total_list[list_item]['id']:
+                    found = True
+            self.assertTrue(found)
+
+    def _create_entity(self, entity_type):
+        f = getattr(self.identity_api, 'create_%s' % entity_type, None)
+        if f is None:
+            f = getattr(self.assignment_api, 'create_%s' % entity_type)
+        return f
+
+    def _delete_entity(self, entity_type):
+        f = getattr(self.identity_api, 'delete_%s' % entity_type, None)
+        if f is None:
+            f = getattr(self.assignment_api, 'delete_%s' % entity_type)
+        return f
+
+    def _list_entities(self, entity_type):
+        f = getattr(self.identity_api, 'list_%ss' % entity_type, None)
+        if f is None:
+            f = getattr(self.assignment_api, 'list_%ss' % entity_type)
+        return f
+
+    def _create_one_entity(self, entity_type, domain_id, name):
+        new_entity = {'name': name,
+                      'domain_id': domain_id}
+        if entity_type in ['user', 'group']:
+            # The manager layer creates the ID for users and groups
+            new_entity = self._create_entity(entity_type)(new_entity)
+        else:
+            new_entity['id'] = '0000' + uuid.uuid4().hex
+            self._create_entity(entity_type)(new_entity['id'], new_entity)
+        return new_entity
+
+    def _create_test_data(self, entity_type, number, domain_id=None,
+                          name_dict=None):
+        """Create entity test data
+
+        :param entity_type: type of entity to create, e.g. 'user', group' etc.
+        :param number: number of entities to create,
+        :param domain_id: if not defined, all users will be created in the
+                          default domain.
+        :param name_dict: optional dict containing entity number and name pairs
+
+        """
+        entity_list = []
+        if domain_id is None:
+            domain_id = CONF.identity.default_domain_id
+        name_dict = name_dict or {}
+        for x in range(number):
+            # If this index has a name defined in the name_dict, then use it
+            name = name_dict.get(x, uuid.uuid4().hex)
+            new_entity = self._create_one_entity(entity_type, domain_id, name)
+            entity_list.append(new_entity)
+        return entity_list
+
+    def _delete_test_data(self, entity_type, entity_list):
+        for entity in entity_list:
+            self._delete_entity(entity_type)(entity['id'])
diff --git a/keystone-moon/keystone/tests/unit/identity/__init__.py b/keystone-moon/keystone/tests/unit/identity/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/tests/unit/identity/test_core.py b/keystone-moon/keystone/tests/unit/identity/test_core.py
new file mode 100644 (file)
index 0000000..6c8faeb
--- /dev/null
@@ -0,0 +1,125 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for core identity behavior."""
+
+import os
+import uuid
+
+import mock
+from oslo_config import cfg
+
+from keystone import exception
+from keystone import identity
+from keystone.tests import unit as tests
+from keystone.tests.unit.ksfixtures import database
+
+
+CONF = cfg.CONF
+
+
+class TestDomainConfigs(tests.BaseTestCase):
+
+    def setUp(self):
+        super(TestDomainConfigs, self).setUp()
+        self.addCleanup(CONF.reset)
+
+        self.tmp_dir = tests.dirs.tmp()
+        CONF.set_override('domain_config_dir', self.tmp_dir, 'identity')
+
+    def test_config_for_nonexistent_domain(self):
+        """Having a config for a non-existent domain will be ignored.
+
+        There are no assertions in this test because there are no side
+        effects. If there is a config file for a domain that does not
+        exist it should be ignored.
+
+        """
+        domain_id = uuid.uuid4().hex
+        domain_config_filename = os.path.join(self.tmp_dir,
+                                              'keystone.%s.conf' % domain_id)
+        self.addCleanup(lambda: os.remove(domain_config_filename))
+        with open(domain_config_filename, 'w'):
+            """Write an empty config file."""
+
+        e = exception.DomainNotFound(domain_id=domain_id)
+        mock_assignment_api = mock.Mock()
+        mock_assignment_api.get_domain_by_name.side_effect = e
+
+        domain_config = identity.DomainConfigs()
+        fake_standard_driver = None
+        domain_config.setup_domain_drivers(fake_standard_driver,
+                                           mock_assignment_api)
+
+    def test_config_for_dot_name_domain(self):
+        # Ensure we can get the right domain name which has dots within it
+        # from filename.
+        domain_config_filename = os.path.join(self.tmp_dir,
+                                              'keystone.abc.def.com.conf')
+        with open(domain_config_filename, 'w'):
+            """Write an empty config file."""
+        self.addCleanup(os.remove, domain_config_filename)
+
+        with mock.patch.object(identity.DomainConfigs,
+                               '_load_config_from_file') as mock_load_config:
+            domain_config = identity.DomainConfigs()
+            fake_assignment_api = None
+            fake_standard_driver = None
+            domain_config.setup_domain_drivers(fake_standard_driver,
+                                               fake_assignment_api)
+            mock_load_config.assert_called_once_with(fake_assignment_api,
+                                                     [domain_config_filename],
+                                                     'abc.def.com')
+
+
+class TestDatabaseDomainConfigs(tests.TestCase):
+
+    def setUp(self):
+        super(TestDatabaseDomainConfigs, self).setUp()
+        self.useFixture(database.Database())
+        self.load_backends()
+
+    def test_domain_config_in_database_disabled_by_default(self):
+        self.assertFalse(CONF.identity.domain_configurations_from_database)
+
+    def test_loading_config_from_database(self):
+        CONF.set_override('domain_configurations_from_database', True,
+                          'identity')
+        domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain['id'], domain)
+        # Override two config options for our domain
+        conf = {'ldap': {'url': uuid.uuid4().hex,
+                         'suffix': uuid.uuid4().hex},
+                'identity': {
+                    'driver': 'keystone.identity.backends.ldap.Identity'}}
+        self.domain_config_api.create_config(domain['id'], conf)
+        fake_standard_driver = None
+        domain_config = identity.DomainConfigs()
+        domain_config.setup_domain_drivers(fake_standard_driver,
+                                           self.resource_api)
+        # Make sure our two overrides are in place, and others are not affected
+        res = domain_config.get_domain_conf(domain['id'])
+        self.assertEqual(conf['ldap']['url'], res.ldap.url)
+        self.assertEqual(conf['ldap']['suffix'], res.ldap.suffix)
+        self.assertEqual(CONF.ldap.query_scope, res.ldap.query_scope)
+
+        # Now turn off using database domain configuration and check that the
+        # default config file values are now seen instead of the overrides.
+        CONF.set_override('domain_configurations_from_database', False,
+                          'identity')
+        domain_config = identity.DomainConfigs()
+        domain_config.setup_domain_drivers(fake_standard_driver,
+                                           self.resource_api)
+        res = domain_config.get_domain_conf(domain['id'])
+        self.assertEqual(CONF.ldap.url, res.ldap.url)
+        self.assertEqual(CONF.ldap.suffix, res.ldap.suffix)
+        self.assertEqual(CONF.ldap.query_scope, res.ldap.query_scope)
diff --git a/keystone-moon/keystone/tests/unit/identity_mapping.py b/keystone-moon/keystone/tests/unit/identity_mapping.py
new file mode 100644 (file)
index 0000000..7fb8063
--- /dev/null
@@ -0,0 +1,23 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from keystone.common import sql
+from keystone.identity.mapping_backends import sql as mapping_sql
+
+
+def list_id_mappings():
+    """List all id_mappings for testing purposes."""
+
+    a_session = sql.get_session()
+    refs = a_session.query(mapping_sql.IDMapping).all()
+    return [x.to_dict() for x in refs]
diff --git a/keystone-moon/keystone/tests/unit/ksfixtures/__init__.py b/keystone-moon/keystone/tests/unit/ksfixtures/__init__.py
new file mode 100644 (file)
index 0000000..81b8029
--- /dev/null
@@ -0,0 +1,15 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from keystone.tests.unit.ksfixtures.cache import Cache  # noqa
+from keystone.tests.unit.ksfixtures.key_repository import KeyRepository  # noqa
diff --git a/keystone-moon/keystone/tests/unit/ksfixtures/appserver.py b/keystone-moon/keystone/tests/unit/ksfixtures/appserver.py
new file mode 100644 (file)
index 0000000..ea1e625
--- /dev/null
@@ -0,0 +1,79 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import fixtures
+from oslo_config import cfg
+from paste import deploy
+
+from keystone.common import environment
+
+
+CONF = cfg.CONF
+
+MAIN = 'main'
+ADMIN = 'admin'
+
+
+class AppServer(fixtures.Fixture):
+    """A fixture for managing an application server instance.
+    """
+
+    def __init__(self, config, name, cert=None, key=None, ca=None,
+                 cert_required=False, host='127.0.0.1', port=0):
+        super(AppServer, self).__init__()
+        self.config = config
+        self.name = name
+        self.cert = cert
+        self.key = key
+        self.ca = ca
+        self.cert_required = cert_required
+        self.host = host
+        self.port = port
+
+    def setUp(self):
+        super(AppServer, self).setUp()
+
+        app = deploy.loadapp(self.config, name=self.name)
+        self.server = environment.Server(app, self.host, self.port)
+        self._setup_SSL_if_requested()
+        self.server.start(key='socket')
+
+        # some tests need to know the port we ran on.
+        self.port = self.server.socket_info['socket'][1]
+        self._update_config_opt()
+
+        self.addCleanup(self.server.stop)
+
+    def _setup_SSL_if_requested(self):
+        # TODO(dstanek): fix environment.Server to take a SSLOpts instance
+        # so that the params are either always set or not
+        if (self.cert is not None and
+                self.ca is not None and
+                self.key is not None):
+            self.server.set_ssl(certfile=self.cert,
+                                keyfile=self.key,
+                                ca_certs=self.ca,
+                                cert_required=self.cert_required)
+
+    def _update_config_opt(self):
+        """Updates the config with the actual port used."""
+        opt_name = self._get_config_option_for_section_name()
+        CONF.set_override(opt_name, self.port, group='eventlet_server')
+
+    def _get_config_option_for_section_name(self):
+        """Maps Paster config section names to port option names."""
+        return {'admin': 'admin_port', 'main': 'public_port'}[self.name]
diff --git a/keystone-moon/keystone/tests/unit/ksfixtures/cache.py b/keystone-moon/keystone/tests/unit/ksfixtures/cache.py
new file mode 100644 (file)
index 0000000..74566f1
--- /dev/null
@@ -0,0 +1,36 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fixtures
+
+from keystone.common import cache
+
+
+class Cache(fixtures.Fixture):
+    """A fixture for setting up and tearing down the cache between test cases.
+    """
+
+    def setUp(self):
+        super(Cache, self).setUp()
+
+        # NOTE(dstanek):  We must remove the existing cache backend in the
+        # setUp instead of the tearDown because it defaults to a no-op cache
+        # and we want the configure call below to create the correct backend.
+
+        # NOTE(morganfainberg):  The only way to reconfigure the CacheRegion
+        # object on each setUp() call is to remove the .backend property.
+        if cache.REGION.is_configured:
+            del cache.REGION.backend
+
+        # ensure the cache region instance is setup
+        cache.configure_cache_region(cache.REGION)
diff --git a/keystone-moon/keystone/tests/unit/ksfixtures/database.py b/keystone-moon/keystone/tests/unit/ksfixtures/database.py
new file mode 100644 (file)
index 0000000..1559753
--- /dev/null
@@ -0,0 +1,124 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import functools
+import os
+import shutil
+
+import fixtures
+from oslo_config import cfg
+from oslo_db import options as db_options
+from oslo_db.sqlalchemy import migration
+
+from keystone.common import sql
+from keystone.common.sql import migration_helpers
+from keystone.tests import unit as tests
+
+
+CONF = cfg.CONF
+
+
+def run_once(f):
+    """A decorator to ensure the decorated function is only executed once.
+
+    The decorated function cannot expect any arguments.
+    """
+    @functools.wraps(f)
+    def wrapper():
+        if not wrapper.already_ran:
+            f()
+            wrapper.already_ran = True
+    wrapper.already_ran = False
+    return wrapper
+
+
+def _setup_database(extensions=None):
+    if CONF.database.connection != tests.IN_MEM_DB_CONN_STRING:
+        db = tests.dirs.tmp('test.db')
+        pristine = tests.dirs.tmp('test.db.pristine')
+
+        if os.path.exists(db):
+            os.unlink(db)
+        if not os.path.exists(pristine):
+            migration.db_sync(sql.get_engine(),
+                              migration_helpers.find_migrate_repo())
+            for extension in (extensions or []):
+                migration_helpers.sync_database_to_version(extension=extension)
+            shutil.copyfile(db, pristine)
+        else:
+            shutil.copyfile(pristine, db)
+
+
+# NOTE(I159): Every execution all the options will be cleared. The method must
+# be called at the every fixture initialization.
+def initialize_sql_session():
+    # Make sure the DB is located in the correct location, in this case set
+    # the default value, as this should be able to be overridden in some
+    # test cases.
+    db_options.set_defaults(
+        CONF,
+        connection=tests.IN_MEM_DB_CONN_STRING)
+
+
+@run_once
+def _load_sqlalchemy_models():
+    """Find all modules containing SQLAlchemy models and import them.
+
+    This creates more consistent, deterministic test runs because tables
+    for all core and extension models are always created in the test
+    database. We ensure this by importing all modules that contain model
+    definitions.
+
+    The database schema during test runs is created using reflection.
+    Reflection is simply SQLAlchemy taking the model definitions for
+    all models currently imported and making tables for each of them.
+    The database schema created during test runs may vary between tests
+    as more models are imported. Importing all models at the start of
+    the test run avoids this problem.
+
+    """
+    keystone_root = os.path.normpath(os.path.join(
+        os.path.dirname(__file__), '..', '..', '..'))
+    for root, dirs, files in os.walk(keystone_root):
+        # NOTE(morganfainberg): Slice the keystone_root off the root to ensure
+        # we do not end up with a module name like:
+        # Users.home.openstack.keystone.assignment.backends.sql
+        root = root[len(keystone_root):]
+        if root.endswith('backends') and 'sql.py' in files:
+            # The root will be prefixed with an instance of os.sep, which will
+            # make the root after replacement '.<root>', the 'keystone' part
+            # of the module path is always added to the front
+            module_name = ('keystone.%s.sql' %
+                           root.replace(os.sep, '.').lstrip('.'))
+            __import__(module_name)
+
+
+class Database(fixtures.Fixture):
+    """A fixture for setting up and tearing down a database.
+
+    """
+
+    def __init__(self, extensions=None):
+        super(Database, self).__init__()
+        self._extensions = extensions
+        initialize_sql_session()
+        _load_sqlalchemy_models()
+
+    def setUp(self):
+        super(Database, self).setUp()
+        _setup_database(extensions=self._extensions)
+
+        self.engine = sql.get_engine()
+        sql.ModelBase.metadata.create_all(bind=self.engine)
+        self.addCleanup(sql.cleanup)
+        self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
diff --git a/keystone-moon/keystone/tests/unit/ksfixtures/hacking.py b/keystone-moon/keystone/tests/unit/ksfixtures/hacking.py
new file mode 100644 (file)
index 0000000..47ef6b4
--- /dev/null
@@ -0,0 +1,489 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# NOTE(morganfainberg) This file shouldn't have flake8 run on it as it has
+# code examples that will fail normal CI pep8/flake8 tests. This is expected.
+# The code has been moved here to ensure that proper tests occur on the
+# test_hacking_checks test cases.
+# flake8: noqa
+
+import fixtures
+
+
+class HackingCode(fixtures.Fixture):
+    """A fixture to house the various code examples for the keystone hacking
+    style checks.
+    """
+
+    mutable_default_args = {
+        'code': """
+                def f():
+                    pass
+
+                def f(a, b='', c=None):
+                    pass
+
+                def f(bad=[]):
+                    pass
+
+                def f(foo, bad=[], more_bad=[x for x in range(3)]):
+                    pass
+
+                def f(foo, bad={}):
+                    pass
+
+                def f(foo, bad={}, another_bad=[], fine=None):
+                    pass
+
+                def f(bad=[]): # noqa
+                    pass
+
+                def funcs(bad=dict(), more_bad=list(), even_more_bad=set()):
+                    "creating mutables through builtins"
+
+                def funcs(bad=something(), more_bad=some_object.something()):
+                    "defaults from any functions"
+
+                def f(bad=set(), more_bad={x for x in range(3)},
+                       even_more_bad={1, 2, 3}):
+                    "set and set comprehession"
+
+                def f(bad={x: x for x in range(3)}):
+                    "dict comprehension"
+            """,
+        'expected_errors': [
+            (7, 10, 'K001'),
+            (10, 15, 'K001'),
+            (10, 29, 'K001'),
+            (13, 15, 'K001'),
+            (16, 15, 'K001'),
+            (16, 31, 'K001'),
+            (22, 14, 'K001'),
+            (22, 31, 'K001'),
+            (22, 53, 'K001'),
+            (25, 14, 'K001'),
+            (25, 36, 'K001'),
+            (28, 10, 'K001'),
+            (28, 27, 'K001'),
+            (29, 21, 'K001'),
+            (32, 11, 'K001'),
+        ]}
+
+    comments_begin_with_space = {
+        'code': """
+            # This is a good comment
+
+            #This is a bad one
+
+            # This is alright and can
+            #    be continued with extra indentation
+            #    if that's what the developer wants.
+        """,
+        'expected_errors': [
+            (3, 0, 'K002'),
+        ]}
+
+    asserting_none_equality = {
+        'code': """
+            class Test(object):
+
+                def test(self):
+                    self.assertEqual('', '')
+                    self.assertEqual('', None)
+                    self.assertEqual(None, '')
+                    self.assertNotEqual('', None)
+                    self.assertNotEqual(None, '')
+                    self.assertNotEqual('', None) # noqa
+                    self.assertNotEqual(None, '') # noqa
+        """,
+        'expected_errors': [
+            (5, 8, 'K003'),
+            (6, 8, 'K003'),
+            (7, 8, 'K004'),
+            (8, 8, 'K004'),
+        ]}
+
+    assert_no_translations_for_debug_logging = {
+        'code': """
+            import logging
+            import logging as stlib_logging
+            from keystone.i18n import _
+            from keystone.i18n import _ as oslo_i18n
+            from keystone.openstack.common import log
+            from keystone.openstack.common import log as oslo_logging
+
+            # stdlib logging
+            L0 = logging.getLogger()
+            L0.debug(_('text'))
+            class C:
+                def __init__(self):
+                    L0.debug(oslo_i18n('text', {}))
+
+            # stdlib logging w/ alias and specifying a logger
+            class C:
+                def __init__(self):
+                    self.L1 = logging.getLogger(__name__)
+                def m(self):
+                    self.L1.debug(
+                        _('text'), {}
+                    )
+
+            # oslo logging and specifying a logger
+            L2 = log.getLogger(__name__)
+            L2.debug(oslo_i18n('text'))
+
+            # oslo logging w/ alias
+            class C:
+                def __init__(self):
+                    self.L3 = oslo_logging.getLogger()
+                    self.L3.debug(_('text'))
+
+            # translation on a separate line
+            msg = _('text')
+            L2.debug(msg)
+
+            # this should not fail
+            if True:
+                msg = _('message %s') % X
+                L2.error(msg)
+                raise TypeError(msg)
+            if True:
+                msg = 'message'
+                L2.debug(msg)
+
+            # this should not fail
+            if True:
+                if True:
+                    msg = _('message')
+                else:
+                    msg = _('message')
+                L2.debug(msg)
+                raise Exception(msg)
+        """,
+        'expected_errors': [
+            (10, 9, 'K005'),
+            (13, 17, 'K005'),
+            (21, 12, 'K005'),
+            (26, 9, 'K005'),
+            (32, 22, 'K005'),
+            (36, 9, 'K005'),
+        ]
+    }
+
+    oslo_namespace_imports = {
+        'code': """
+            import oslo.utils
+            import oslo_utils
+            import oslo.utils.encodeutils
+            import oslo_utils.encodeutils
+            from oslo import utils
+            from oslo.utils import encodeutils
+            from oslo_utils import encodeutils
+
+            import oslo.serialization
+            import oslo_serialization
+            import oslo.serialization.jsonutils
+            import oslo_serialization.jsonutils
+            from oslo import serialization
+            from oslo.serialization import jsonutils
+            from oslo_serialization import jsonutils
+
+            import oslo.messaging
+            import oslo_messaging
+            import oslo.messaging.conffixture
+            import oslo_messaging.conffixture
+            from oslo import messaging
+            from oslo.messaging import conffixture
+            from oslo_messaging import conffixture
+
+            import oslo.db
+            import oslo_db
+            import oslo.db.api
+            import oslo_db.api
+            from oslo import db
+            from oslo.db import api
+            from oslo_db import api
+
+            import oslo.config
+            import oslo_config
+            import oslo.config.cfg
+            import oslo_config.cfg
+            from oslo import config
+            from oslo.config import cfg
+            from oslo_config import cfg
+
+            import oslo.i18n
+            import oslo_i18n
+            import oslo.i18n.log
+            import oslo_i18n.log
+            from oslo import i18n
+            from oslo.i18n import log
+            from oslo_i18n import log
+        """,
+        'expected_errors': [
+            (1, 0, 'K333'),
+            (3, 0, 'K333'),
+            (5, 0, 'K333'),
+            (6, 0, 'K333'),
+            (9, 0, 'K333'),
+            (11, 0, 'K333'),
+            (13, 0, 'K333'),
+            (14, 0, 'K333'),
+            (17, 0, 'K333'),
+            (19, 0, 'K333'),
+            (21, 0, 'K333'),
+            (22, 0, 'K333'),
+            (25, 0, 'K333'),
+            (27, 0, 'K333'),
+            (29, 0, 'K333'),
+            (30, 0, 'K333'),
+            (33, 0, 'K333'),
+            (35, 0, 'K333'),
+            (37, 0, 'K333'),
+            (38, 0, 'K333'),
+            (41, 0, 'K333'),
+            (43, 0, 'K333'),
+            (45, 0, 'K333'),
+            (46, 0, 'K333'),
+        ],
+    }
+
+    dict_constructor = {
+        'code': """
+            lower_res = {k.lower(): v for k, v in six.iteritems(res[1])}
+            fool = dict(a='a', b='b')
+            lower_res = dict((k.lower(), v) for k, v in six.iteritems(res[1]))
+            attrs = dict([(k, _from_json(v))])
+            dict([[i,i] for i in range(3)])
+            dict(({1:2}))
+        """,
+        'expected_errors': [
+            (3, 0, 'K008'),
+            (4, 0, 'K008'),
+            (5, 0, 'K008'),
+        ]}
+
+
+class HackingLogging(fixtures.Fixture):
+
+    shared_imports = """
+                import logging
+                import logging as stlib_logging
+                from keystone.i18n import _
+                from keystone.i18n import _ as oslo_i18n
+                from keystone.i18n import _LC
+                from keystone.i18n import _LE
+                from keystone.i18n import _LE as error_hint
+                from keystone.i18n import _LI
+                from keystone.i18n import _LW
+                from keystone.openstack.common import log
+                from keystone.openstack.common import log as oslo_logging
+    """
+
+    examples = [
+        {
+            'code': """
+                # stdlib logging
+                LOG = logging.getLogger()
+                LOG.info(_('text'))
+                class C:
+                    def __init__(self):
+                        LOG.warn(oslo_i18n('text', {}))
+                        LOG.warn(_LW('text', {}))
+            """,
+            'expected_errors': [
+                (3, 9, 'K006'),
+                (6, 17, 'K006'),
+            ],
+        },
+        {
+            'code': """
+                # stdlib logging w/ alias and specifying a logger
+                class C:
+                    def __init__(self):
+                        self.L = logging.getLogger(__name__)
+                    def m(self):
+                        self.L.warning(
+                            _('text'), {}
+                        )
+                        self.L.warning(
+                            _LW('text'), {}
+                        )
+            """,
+            'expected_errors': [
+                (7, 12, 'K006'),
+            ],
+        },
+        {
+            'code': """
+                # oslo logging and specifying a logger
+                L = log.getLogger(__name__)
+                L.error(oslo_i18n('text'))
+                L.error(error_hint('text'))
+            """,
+            'expected_errors': [
+                (3, 8, 'K006'),
+            ],
+        },
+        {
+            'code': """
+                # oslo logging w/ alias
+                class C:
+                    def __init__(self):
+                        self.LOG = oslo_logging.getLogger()
+                        self.LOG.critical(_('text'))
+                        self.LOG.critical(_LC('text'))
+            """,
+            'expected_errors': [
+                (5, 26, 'K006'),
+            ],
+        },
+        {
+            'code': """
+                LOG = log.getLogger(__name__)
+                # translation on a separate line
+                msg = _('text')
+                LOG.exception(msg)
+                msg = _LE('text')
+                LOG.exception(msg)
+            """,
+            'expected_errors': [
+                (4, 14, 'K006'),
+            ],
+        },
+        {
+            'code': """
+                LOG = logging.getLogger()
+
+                # ensure the correct helper is being used
+                LOG.warn(_LI('this should cause an error'))
+
+                # debug should not allow any helpers either
+                LOG.debug(_LI('this should cause an error'))
+            """,
+            'expected_errors': [
+                (4, 9, 'K006'),
+                (7, 10, 'K005'),
+            ],
+        },
+        {
+            'code': """
+                # this should not be an error
+                L = log.getLogger(__name__)
+                msg = _('text')
+                L.warn(msg)
+                raise Exception(msg)
+            """,
+            'expected_errors': [],
+        },
+        {
+            'code': """
+                L = log.getLogger(__name__)
+                def f():
+                    msg = _('text')
+                    L2.warn(msg)
+                    something = True  # add an extra statement here
+                    raise Exception(msg)
+            """,
+            'expected_errors': [],
+        },
+        {
+            'code': """
+                LOG = log.getLogger(__name__)
+                def func():
+                    msg = _('text')
+                    LOG.warn(msg)
+                    raise Exception('some other message')
+            """,
+            'expected_errors': [
+                (4, 13, 'K006'),
+            ],
+        },
+        {
+            'code': """
+                LOG = log.getLogger(__name__)
+                if True:
+                    msg = _('text')
+                else:
+                    msg = _('text')
+                LOG.warn(msg)
+                raise Exception(msg)
+            """,
+            'expected_errors': [
+            ],
+        },
+        {
+            'code': """
+                LOG = log.getLogger(__name__)
+                if True:
+                    msg = _('text')
+                else:
+                    msg = _('text')
+                LOG.warn(msg)
+            """,
+            'expected_errors': [
+                (6, 9, 'K006'),
+            ],
+        },
+        {
+            'code': """
+                LOG = log.getLogger(__name__)
+                msg = _LW('text')
+                LOG.warn(msg)
+                raise Exception(msg)
+            """,
+            'expected_errors': [
+                (3, 9, 'K007'),
+            ],
+        },
+        {
+            'code': """
+                LOG = log.getLogger(__name__)
+                msg = _LW('text')
+                LOG.warn(msg)
+                msg = _('something else')
+                raise Exception(msg)
+            """,
+            'expected_errors': [],
+        },
+        {
+            'code': """
+                LOG = log.getLogger(__name__)
+                msg = _LW('hello %s') % 'world'
+                LOG.warn(msg)
+                raise Exception(msg)
+            """,
+            'expected_errors': [
+                (3, 9, 'K007'),
+            ],
+        },
+        {
+            'code': """
+                LOG = log.getLogger(__name__)
+                msg = _LW('hello %s') % 'world'
+                LOG.warn(msg)
+            """,
+            'expected_errors': [],
+        },
+        {
+            'code': """
+                # this should not be an error
+                LOG = log.getLogger(__name__)
+                try:
+                    something = True
+                except AssertionError as e:
+                    LOG.warning(six.text_type(e))
+                    raise exception.Unauthorized(e)
+            """,
+            'expected_errors': [],
+        },
+    ]
diff --git a/keystone-moon/keystone/tests/unit/ksfixtures/key_repository.py b/keystone-moon/keystone/tests/unit/ksfixtures/key_repository.py
new file mode 100644 (file)
index 0000000..d1ac2ab
--- /dev/null
@@ -0,0 +1,34 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import shutil
+import tempfile
+
+import fixtures
+
+from keystone.token.providers.fernet import utils
+
+
+class KeyRepository(fixtures.Fixture):
+    def __init__(self, config_fixture):
+        super(KeyRepository, self).__init__()
+        self.config_fixture = config_fixture
+
+    def setUp(self):
+        super(KeyRepository, self).setUp()
+        directory = tempfile.mkdtemp()
+        self.addCleanup(shutil.rmtree, directory)
+        self.config_fixture.config(group='fernet_tokens',
+                                   key_repository=directory)
+
+        utils.create_key_directory()
+        utils.initialize_key_repository()
diff --git a/keystone-moon/keystone/tests/unit/ksfixtures/temporaryfile.py b/keystone-moon/keystone/tests/unit/ksfixtures/temporaryfile.py
new file mode 100644 (file)
index 0000000..a4be06f
--- /dev/null
@@ -0,0 +1,29 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import tempfile
+
+import fixtures
+
+
+class SecureTempFile(fixtures.Fixture):
+    """A fixture for creating a secure temp file."""
+
+    def setUp(self):
+        super(SecureTempFile, self).setUp()
+
+        _fd, self.file_name = tempfile.mkstemp()
+        # Make sure no file descriptors are leaked, close the unused FD.
+        os.close(_fd)
+        self.addCleanup(os.remove, self.file_name)
diff --git a/keystone-moon/keystone/tests/unit/mapping_fixtures.py b/keystone-moon/keystone/tests/unit/mapping_fixtures.py
new file mode 100644 (file)
index 0000000..0892ada
--- /dev/null
@@ -0,0 +1,1023 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Fixtures for Federation Mapping."""
+
+EMPLOYEE_GROUP_ID = "0cd5e9"
+CONTRACTOR_GROUP_ID = "85a868"
+TESTER_GROUP_ID = "123"
+TESTER_GROUP_NAME = "tester"
+DEVELOPER_GROUP_ID = "xyz"
+DEVELOPER_GROUP_NAME = "Developer"
+CONTRACTOR_GROUP_NAME = "Contractor"
+DEVELOPER_GROUP_DOMAIN_NAME = "outsourcing"
+DEVELOPER_GROUP_DOMAIN_ID = "5abc43"
+FEDERATED_DOMAIN = "Federated"
+LOCAL_DOMAIN = "Local"
+
+# Mapping summary:
+# LastName Smith & Not Contractor or SubContractor -> group 0cd5e9
+# FirstName Jill & Contractor or SubContractor -> to group 85a868
+MAPPING_SMALL = {
+    "rules": [
+        {
+            "local": [
+                {
+                    "group": {
+                        "id": EMPLOYEE_GROUP_ID
+                    }
+                },
+                {
+                    "user": {
+                        "name": "{0}"
+                    }
+                }
+            ],
+            "remote": [
+                {
+                    "type": "UserName"
+                },
+                {
+                    "type": "orgPersonType",
+                    "not_any_of": [
+                        "Contractor",
+                        "SubContractor"
+                    ]
+                },
+                {
+                    "type": "LastName",
+                    "any_one_of": [
+                        "Bo"
+                    ]
+                }
+            ]
+        },
+        {
+            "local": [
+                {
+                    "group": {
+                        "id": CONTRACTOR_GROUP_ID
+                    }
+                },
+                {
+                    "user": {
+                        "name": "{0}"
+                    }
+                }
+            ],
+            "remote": [
+                {
+                    "type": "UserName"
+                },
+                {
+                    "type": "orgPersonType",
+                    "any_one_of": [
+                        "Contractor",
+                        "SubContractor"
+                    ]
+                },
+                {
+                    "type": "FirstName",
+                    "any_one_of": [
+                        "Jill"
+                    ]
+                }
+            ]
+        }
+    ]
+}
+
+# Mapping summary:
+# orgPersonType Admin or Big Cheese -> name {0} {1} email {2} and group 0cd5e9
+# orgPersonType Customer -> user name {0} email {1}
+# orgPersonType Test and email ^@example.com$ -> group 123 and xyz
+MAPPING_LARGE = {
+    "rules": [
+        {
+            "local": [
+                {
+                    "user": {
+                        "name": "{0} {1}",
+                        "email": "{2}"
+                    },
+                    "group": {
+                        "id": EMPLOYEE_GROUP_ID
+                    }
+                }
+            ],
+            "remote": [
+                {
+                    "type": "FirstName"
+                },
+                {
+                    "type": "LastName"
+                },
+                {
+                    "type": "Email"
+                },
+                {
+                    "type": "orgPersonType",
+                    "any_one_of": [
+                        "Admin",
+                        "Big Cheese"
+                    ]
+                }
+            ]
+        },
+        {
+            "local": [
+                {
+                    "user": {
+                        "name": "{0}",
+                        "email": "{1}"
+                    }
+                }
+            ],
+            "remote": [
+                {
+                    "type": "UserName"
+                },
+                {
+                    "type": "Email"
+                },
+                {
+                    "type": "orgPersonType",
+                    "not_any_of": [
+                        "Admin",
+                        "Employee",
+                        "Contractor",
+                        "Tester"
+                    ]
+                }
+            ]
+        },
+        {
+            "local": [
+                {
+                    "group": {
+                        "id": TESTER_GROUP_ID
+                    }
+                },
+                {
+                    "group": {
+                        "id": DEVELOPER_GROUP_ID
+                    }
+                },
+                {
+                    "user": {
+                        "name": "{0}"
+                    }
+                }
+            ],
+            "remote": [
+                {
+                    "type": "UserName"
+                },
+                {
+                    "type": "orgPersonType",
+                    "any_one_of": [
+                        "Tester"
+                    ]
+                },
+                {
+                    "type": "Email",
+                    "any_one_of": [
+                        ".*@example.com$"
+                    ],
+                    "regex": True
+                }
+            ]
+        }
+    ]
+}
+
+MAPPING_BAD_REQ = {
+    "rules": [
+        {
+            "local": [
+                {
+                    "user": "name"
+                }
+            ],
+            "remote": [
+                {
+                    "type": "UserName",
+                    "bad_requirement": [
+                        "Young"
+                    ]
+                }
+            ]
+        }
+    ]
+}
+
+MAPPING_BAD_VALUE = {
+    "rules": [
+        {
+            "local": [
+                {
+                    "user": "name"
+                }
+            ],
+            "remote": [
+                {
+                    "type": "UserName",
+                    "any_one_of": "should_be_list"
+                }
+            ]
+        }
+    ]
+}
+
+MAPPING_NO_RULES = {
+    'rules': []
+}
+
+MAPPING_NO_REMOTE = {
+    "rules": [
+        {
+            "local": [
+                {
+                    "user": "name"
+                }
+            ],
+            "remote": []
+        }
+    ]
+}
+
+MAPPING_MISSING_LOCAL = {
+    "rules": [
+        {
+            "remote": [
+                {
+                    "type": "UserName",
+                    "any_one_of": "should_be_list"
+                }
+            ]
+        }
+    ]
+}
+
+MAPPING_WRONG_TYPE = {
+    "rules": [
+        {
+            "local": [
+                {
+                    "user": "{1}"
+                }
+            ],
+            "remote": [
+                {
+                    "not_type": "UserName"
+                }
+            ]
+        }
+    ]
+}
+
+MAPPING_MISSING_TYPE = {
+    "rules": [
+        {
+            "local": [
+                {
+                    "user": "{1}"
+                }
+            ],
+            "remote": [
+                {}
+            ]
+        }
+    ]
+}
+
+MAPPING_EXTRA_REMOTE_PROPS_NOT_ANY_OF = {
+    "rules": [
+        {
+            "local": [
+                {
+                    "group": {
+                        "id": "0cd5e9"
+                    }
+                },
+                {
+                    "user": {
+                        "name": "{0}"
+                    }
+                }
+            ],
+            "remote": [
+                {
+                    "type": "UserName"
+                },
+                {
+                    "type": "orgPersonType",
+                    "not_any_of": [
+                        "SubContractor"
+                    ],
+                    "invalid_type": "xyz"
+                }
+            ]
+        }
+    ]
+}
+
+MAPPING_EXTRA_REMOTE_PROPS_ANY_ONE_OF = {
+    "rules": [
+        {
+            "local": [
+                {
+                    "group": {
+                        "id": "0cd5e9"
+                    }
+                },
+                {
+                    "user": {
+                        "name": "{0}"
+                    }
+                }
+            ],
+            "remote": [
+                {
+                    "type": "UserName"
+                },
+                {
+                    "type": "orgPersonType",
+                    "any_one_of": [
+                        "SubContractor"
+                    ],
+                    "invalid_type": "xyz"
+                }
+            ]
+        }
+    ]
+}
+
+MAPPING_EXTRA_REMOTE_PROPS_JUST_TYPE = {
+    "rules": [
+        {
+            "local": [
+                {
+                    "group": {
+                        "id": "0cd5e9"
+                    }
+                },
+                {
+                    "user": {
+                        "name": "{0}"
+                    }
+                }
+            ],
+            "remote": [
+                {
+                    "type": "UserName"
+                },
+                {
+                    "type": "orgPersonType",
+                    "invalid_type": "xyz"
+                }
+            ]
+        }
+    ]
+}
+
+MAPPING_EXTRA_RULES_PROPS = {
+    "rules": [
+        {
+            "local": [
+                {
+                    "group": {
+                        "id": "0cd5e9"
+                    }
+                },
+                {
+                    "user": {
+                        "name": "{0}"
+                    }
+                }
+            ],
+            "invalid_type": {
+                "id": "xyz",
+            },
+            "remote": [
+                {
+                    "type": "UserName"
+                },
+                {
+                    "type": "orgPersonType",
+                    "not_any_of": [
+                        "SubContractor"
+                    ]
+                }
+            ]
+        }
+    ]
+}
+
+MAPPING_TESTER_REGEX = {
+    "rules": [
+        {
+            "local": [
+                {
+                    "user": {
+                        "name": "{0}",
+                    }
+                }
+            ],
+            "remote": [
+                {
+                    "type": "UserName"
+                }
+            ]
+        },
+        {
+            "local": [
+                {
+                    "group": {
+                        "id": TESTER_GROUP_ID
+                    }
+                }
+            ],
+            "remote": [
+                {
+                    "type": "orgPersonType",
+                    "any_one_of": [
+                        ".*Tester*"
+                    ],
+                    "regex": True
+                }
+            ]
+        }
+    ]
+}
+
+MAPPING_DEVELOPER_REGEX = {
+    "rules": [
+        {
+            "local": [
+                {
+                    "user": {
+                        "name": "{0}",
+                    },
+                    "group": {
+                        "id": DEVELOPER_GROUP_ID
+                    }
+                }
+            ],
+            "remote": [
+                {
+                    "type": "UserName"
+                },
+                {
+                    "type": "orgPersonType",
+                    "any_one_of": [
+                        "Developer"
+                    ],
+                },
+                {
+                    "type": "Email",
+                    "not_any_of": [
+                        ".*@example.org$"
+                    ],
+                    "regex": True
+                }
+            ]
+        }
+    ]
+}
+
+MAPPING_GROUP_NAMES = {
+
+    "rules": [
+        {
+            "local": [
+                {
+                    "user": {
+                        "name": "{0}",
+                    }
+                }
+            ],
+            "remote": [
+                {
+                    "type": "UserName"
+                }
+            ]
+        },
+        {
+            "local": [
+                {
+                    "group": {
+                        "name": DEVELOPER_GROUP_NAME,
+                        "domain": {
+                            "name": DEVELOPER_GROUP_DOMAIN_NAME
+                        }
+                    }
+                }
+            ],
+            "remote": [
+                {
+                    "type": "orgPersonType",
+                    "any_one_of": [
+                        "Employee"
+                    ],
+                }
+            ]
+        },
+        {
+            "local": [
+                {
+                    "group": {
+                        "name": TESTER_GROUP_NAME,
+                        "domain": {
+                            "id": DEVELOPER_GROUP_DOMAIN_ID
+                        }
+                    }
+                }
+            ],
+            "remote": [
+                {
+                    "type": "orgPersonType",
+                    "any_one_of": [
+                        "BuildingX"
+                    ]
+                }
+            ]
+        },
+    ]
+}
+
+MAPPING_EPHEMERAL_USER = {
+    "rules": [
+        {
+            "local": [
+                {
+                    "user": {
+                        "name": "{0}",
+                        "domain": {
+                            "id": FEDERATED_DOMAIN
+                        },
+                        "type": "ephemeral"
+                    }
+                }
+            ],
+            "remote": [
+                {
+                    "type": "UserName"
+                },
+                {
+                    "type": "UserName",
+                    "any_one_of": [
+                        "tbo"
+                    ]
+                }
+            ]
+        }
+    ]
+}
+
+MAPPING_GROUPS_WHITELIST = {
+    "rules": [
+        {
+            "remote": [
+                {
+                    "type": "orgPersonType",
+                    "whitelist": [
+                        "Developer", "Contractor"
+                    ]
+                },
+                {
+                    "type": "UserName"
+                }
+            ],
+            "local": [
+                {
+                    "groups": "{0}",
+                    "domain": {
+                        "id": DEVELOPER_GROUP_DOMAIN_ID
+                    }
+                },
+                {
+                    "user": {
+                        "name": "{1}"
+                    }
+                }
+            ]
+        }
+    ]
+}
+
+MAPPING_EPHEMERAL_USER_LOCAL_DOMAIN = {
+    "rules": [
+        {
+            "local": [
+                {
+                    "user": {
+                        "name": "{0}",
+                        "domain": {
+                            "id": LOCAL_DOMAIN
+                        },
+                        "type": "ephemeral"
+                    }
+                }
+            ],
+            "remote": [
+                {
+                    "type": "UserName"
+                },
+                {
+                    "type": "UserName",
+                    "any_one_of": [
+                        "jsmith"
+                    ]
+                }
+            ]
+        }
+    ]
+}
+
+MAPPING_GROUPS_WHITELIST_MISSING_DOMAIN = {
+    "rules": [
+        {
+            "remote": [
+                {
+                    "type": "orgPersonType",
+                    "whitelist": [
+                        "Developer", "Contractor"
+                    ]
+                },
+            ],
+            "local": [
+                {
+                    "groups": "{0}",
+                }
+            ]
+        }
+    ]
+}
+
+MAPPING_LOCAL_USER_LOCAL_DOMAIN = {
+    "rules": [
+        {
+            "local": [
+                {
+                    "user": {
+                        "name": "{0}",
+                        "domain": {
+                            "id": LOCAL_DOMAIN
+                        },
+                        "type": "local"
+                    }
+                }
+            ],
+            "remote": [
+                {
+                    "type": "UserName"
+                },
+                {
+                    "type": "UserName",
+                    "any_one_of": [
+                        "jsmith"
+                    ]
+                }
+            ]
+        }
+    ]
+}
+
+MAPPING_GROUPS_BLACKLIST_MULTIPLES = {
+    "rules": [
+        {
+            "remote": [
+                {
+                    "type": "orgPersonType",
+                    "blacklist": [
+                        "Developer", "Manager"
+                    ]
+                },
+                {
+                    "type": "Thing"  # this could be variable length!
+                },
+                {
+                    "type": "UserName"
+                },
+            ],
+            "local": [
+                {
+                    "groups": "{0}",
+                    "domain": {
+                        "id": DEVELOPER_GROUP_DOMAIN_ID
+                    }
+                },
+                {
+                    "user": {
+                        "name": "{2}",
+                    }
+                }
+            ]
+        }
+    ]
+}
+MAPPING_GROUPS_BLACKLIST = {
+    "rules": [
+        {
+            "remote": [
+                {
+                    "type": "orgPersonType",
+                    "blacklist": [
+                        "Developer", "Manager"
+                    ]
+                },
+                {
+                    "type": "UserName"
+                }
+            ],
+            "local": [
+                {
+                    "groups": "{0}",
+                    "domain": {
+                        "id": DEVELOPER_GROUP_DOMAIN_ID
+                    }
+                },
+                {
+                    "user": {
+                        "name": "{1}"
+                    }
+                }
+            ]
+        }
+    ]
+}
+
+# Excercise all possibilities of user identitfication. Values are hardcoded on
+# purpose.
+MAPPING_USER_IDS = {
+    "rules": [
+        {
+            "local": [
+                {
+                    "user": {
+                        "name": "{0}"
+                    }
+                }
+            ],
+            "remote": [
+                {
+                    "type": "UserName"
+                },
+                {
+                    "type": "UserName",
+                    "any_one_of": [
+                        "jsmith"
+                    ]
+                }
+            ]
+        },
+        {
+            "local": [
+                {
+                    "user": {
+                        "name": "{0}",
+                        "domain": {
+                            "id": "federated"
+                        }
+                    }
+                }
+            ],
+            "remote": [
+                {
+                    "type": "UserName"
+                },
+                {
+                    "type": "UserName",
+                    "any_one_of": [
+                        "tbo"
+                    ]
+                }
+            ]
+        },
+        {
+            "local": [
+                {
+                    "user": {
+                        "id": "{0}"
+                    }
+                }
+            ],
+            "remote": [
+                {
+                    "type": "UserName"
+                },
+                {
+                    "type": "UserName",
+                    "any_one_of": [
+                        "bob"
+                    ]
+                }
+            ]
+        },
+        {
+            "local": [
+                {
+                    "user": {
+                        "id": "abc123",
+                        "name": "{0}",
+                        "domain": {
+                            "id": "federated"
+                        }
+                    }
+                }
+            ],
+            "remote": [
+                {
+                    "type": "UserName"
+                },
+                {
+                    "type": "UserName",
+                    "any_one_of": [
+                        "bwilliams"
+                    ]
+                }
+            ]
+        }
+    ]
+}
+
+MAPPING_GROUPS_BLACKLIST_MISSING_DOMAIN = {
+    "rules": [
+        {
+            "remote": [
+                {
+                    "type": "orgPersonType",
+                    "blacklist": [
+                        "Developer", "Manager"
+                    ]
+                },
+            ],
+            "local": [
+                {
+                    "groups": "{0}",
+                },
+            ]
+        }
+    ]
+}
+
+MAPPING_GROUPS_WHITELIST_AND_BLACKLIST = {
+    "rules": [
+        {
+            "remote": [
+                {
+                    "type": "orgPersonType",
+                    "blacklist": [
+                        "Employee"
+                    ],
+                    "whitelist": [
+                        "Contractor"
+                    ]
+                },
+            ],
+            "local": [
+                {
+                    "groups": "{0}",
+                    "domain": {
+                        "id": DEVELOPER_GROUP_DOMAIN_ID
+                    }
+                },
+            ]
+        }
+    ]
+}
+
+EMPLOYEE_ASSERTION = {
+    'Email': 'tim@example.com',
+    'UserName': 'tbo',
+    'FirstName': 'Tim',
+    'LastName': 'Bo',
+    'orgPersonType': 'Employee;BuildingX'
+}
+
+EMPLOYEE_ASSERTION_MULTIPLE_GROUPS = {
+    'Email': 'tim@example.com',
+    'UserName': 'tbo',
+    'FirstName': 'Tim',
+    'LastName': 'Bo',
+    'orgPersonType': 'Developer;Manager;Contractor',
+    'Thing': 'yes!;maybe!;no!!'
+}
+
+EMPLOYEE_ASSERTION_PREFIXED = {
+    'PREFIX_Email': 'tim@example.com',
+    'PREFIX_UserName': 'tbo',
+    'PREFIX_FirstName': 'Tim',
+    'PREFIX_LastName': 'Bo',
+    'PREFIX_orgPersonType': 'SuperEmployee;BuildingX'
+}
+
+CONTRACTOR_ASSERTION = {
+    'Email': 'jill@example.com',
+    'UserName': 'jsmith',
+    'FirstName': 'Jill',
+    'LastName': 'Smith',
+    'orgPersonType': 'Contractor;Non-Dev'
+}
+
+ADMIN_ASSERTION = {
+    'Email': 'bob@example.com',
+    'UserName': 'bob',
+    'FirstName': 'Bob',
+    'LastName': 'Thompson',
+    'orgPersonType': 'Admin;Chief'
+}
+
+CUSTOMER_ASSERTION = {
+    'Email': 'beth@example.com',
+    'UserName': 'bwilliams',
+    'FirstName': 'Beth',
+    'LastName': 'Williams',
+    'orgPersonType': 'Customer'
+}
+
+ANOTHER_CUSTOMER_ASSERTION = {
+    'Email': 'mark@example.com',
+    'UserName': 'markcol',
+    'FirstName': 'Mark',
+    'LastName': 'Collins',
+    'orgPersonType': 'Managers;CEO;CTO'
+}
+
+TESTER_ASSERTION = {
+    'Email': 'testacct@example.com',
+    'UserName': 'testacct',
+    'FirstName': 'Test',
+    'LastName': 'Account',
+    'orgPersonType': 'MadeupGroup;Tester;GroupX'
+}
+
+ANOTHER_TESTER_ASSERTION = {
+    'UserName': 'IamTester'
+}
+
+BAD_TESTER_ASSERTION = {
+    'Email': 'eviltester@example.org',
+    'UserName': 'Evil',
+    'FirstName': 'Test',
+    'LastName': 'Account',
+    'orgPersonType': 'Tester'
+}
+
+BAD_DEVELOPER_ASSERTION = {
+    'Email': 'evildeveloper@example.org',
+    'UserName': 'Evil',
+    'FirstName': 'Develop',
+    'LastName': 'Account',
+    'orgPersonType': 'Developer'
+}
+
+MALFORMED_TESTER_ASSERTION = {
+    'Email': 'testacct@example.com',
+    'UserName': 'testacct',
+    'FirstName': 'Test',
+    'LastName': 'Account',
+    'orgPersonType': 'Tester',
+    'object': object(),
+    'dictionary': dict(zip('teststring', xrange(10))),
+    'tuple': tuple(xrange(5))
+}
+
+DEVELOPER_ASSERTION = {
+    'Email': 'developacct@example.com',
+    'UserName': 'developacct',
+    'FirstName': 'Develop',
+    'LastName': 'Account',
+    'orgPersonType': 'Developer'
+}
+
+CONTRACTOR_MALFORMED_ASSERTION = {
+    'UserName': 'user',
+    'FirstName': object(),
+    'orgPersonType': 'Contractor'
+}
+
+LOCAL_USER_ASSERTION = {
+    'UserName': 'marek',
+    'UserType': 'random'
+}
+
+ANOTHER_LOCAL_USER_ASSERTION = {
+    'UserName': 'marek',
+    'Position': 'DirectorGeneral'
+}
+
+UNMATCHED_GROUP_ASSERTION = {
+    'REMOTE_USER': 'Any Momoose',
+    'REMOTE_USER_GROUPS': 'EXISTS;NO_EXISTS'
+}
diff --git a/keystone-moon/keystone/tests/unit/rest.py b/keystone-moon/keystone/tests/unit/rest.py
new file mode 100644 (file)
index 0000000..1651302
--- /dev/null
@@ -0,0 +1,245 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_serialization import jsonutils
+import six
+import webtest
+
+from keystone.auth import controllers as auth_controllers
+from keystone.tests import unit as tests
+from keystone.tests.unit import default_fixtures
+from keystone.tests.unit.ksfixtures import database
+
+
+class RestfulTestCase(tests.TestCase):
+    """Performs restful tests against the WSGI app over HTTP.
+
+    This class launches public & admin WSGI servers for every test, which can
+    be accessed by calling ``public_request()`` or ``admin_request()``,
+    respectfully.
+
+    ``restful_request()`` and ``request()`` methods are also exposed if you
+    need to bypass restful conventions or access HTTP details in your test
+    implementation.
+
+    Three new asserts are provided:
+
+    * ``assertResponseSuccessful``: called automatically for every request
+        unless an ``expected_status`` is provided
+    * ``assertResponseStatus``: called instead of ``assertResponseSuccessful``,
+        if an ``expected_status`` is provided
+    * ``assertValidResponseHeaders``: validates that the response headers
+        appear as expected
+
+    Requests are automatically serialized according to the defined
+    ``content_type``. Responses are automatically deserialized as well, and
+    available in the ``response.body`` attribute. The original body content is
+    available in the ``response.raw`` attribute.
+
+    """
+
+    # default content type to test
+    content_type = 'json'
+
+    def get_extensions(self):
+        return None
+
+    def setUp(self, app_conf='keystone'):
+        super(RestfulTestCase, self).setUp()
+
+        # Will need to reset the plug-ins
+        self.addCleanup(setattr, auth_controllers, 'AUTH_METHODS', {})
+
+        self.useFixture(database.Database(extensions=self.get_extensions()))
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+
+        self.public_app = webtest.TestApp(
+            self.loadapp(app_conf, name='main'))
+        self.addCleanup(delattr, self, 'public_app')
+        self.admin_app = webtest.TestApp(
+            self.loadapp(app_conf, name='admin'))
+        self.addCleanup(delattr, self, 'admin_app')
+
+    def request(self, app, path, body=None, headers=None, token=None,
+                expected_status=None, **kwargs):
+        if headers:
+            headers = {str(k): str(v) for k, v in six.iteritems(headers)}
+        else:
+            headers = {}
+
+        if token:
+            headers['X-Auth-Token'] = str(token)
+
+        # sets environ['REMOTE_ADDR']
+        kwargs.setdefault('remote_addr', 'localhost')
+
+        response = app.request(path, headers=headers,
+                               status=expected_status, body=body,
+                               **kwargs)
+
+        return response
+
+    def assertResponseSuccessful(self, response):
+        """Asserts that a status code lies inside the 2xx range.
+
+        :param response: :py:class:`httplib.HTTPResponse` to be
+          verified to have a status code between 200 and 299.
+
+        example::
+
+             self.assertResponseSuccessful(response)
+        """
+        self.assertTrue(
+            response.status_code >= 200 and response.status_code <= 299,
+            'Status code %d is outside of the expected range (2xx)\n\n%s' %
+            (response.status, response.body))
+
+    def assertResponseStatus(self, response, expected_status):
+        """Asserts a specific status code on the response.
+
+        :param response: :py:class:`httplib.HTTPResponse`
+        :param expected_status: The specific ``status`` result expected
+
+        example::
+
+            self.assertResponseStatus(response, 204)
+        """
+        self.assertEqual(
+            response.status_code,
+            expected_status,
+            'Status code %s is not %s, as expected)\n\n%s' %
+            (response.status_code, expected_status, response.body))
+
+    def assertValidResponseHeaders(self, response):
+        """Ensures that response headers appear as expected."""
+        self.assertIn('X-Auth-Token', response.headers.get('Vary'))
+
+    def assertValidErrorResponse(self, response, expected_status=400):
+        """Verify that the error response is valid.
+
+        Subclasses can override this function based on the expected response.
+
+        """
+        self.assertEqual(response.status_code, expected_status)
+        error = response.result['error']
+        self.assertEqual(error['code'], response.status_code)
+        self.assertIsNotNone(error.get('title'))
+
+    def _to_content_type(self, body, headers, content_type=None):
+        """Attempt to encode JSON and XML automatically."""
+        content_type = content_type or self.content_type
+
+        if content_type == 'json':
+            headers['Accept'] = 'application/json'
+            if body:
+                headers['Content-Type'] = 'application/json'
+                return jsonutils.dumps(body)
+
+    def _from_content_type(self, response, content_type=None):
+        """Attempt to decode JSON and XML automatically, if detected."""
+        content_type = content_type or self.content_type
+
+        if response.body is not None and response.body.strip():
+            # if a body is provided, a Content-Type is also expected
+            header = response.headers.get('Content-Type')
+            self.assertIn(content_type, header)
+
+            if content_type == 'json':
+                response.result = jsonutils.loads(response.body)
+            else:
+                response.result = response.body
+
+    def restful_request(self, method='GET', headers=None, body=None,
+                        content_type=None, response_content_type=None,
+                        **kwargs):
+        """Serializes/deserializes json as request/response body.
+
+        .. WARNING::
+
+            * Existing Accept header will be overwritten.
+            * Existing Content-Type header will be overwritten.
+
+        """
+        # Initialize headers dictionary
+        headers = {} if not headers else headers
+
+        body = self._to_content_type(body, headers, content_type)
+
+        # Perform the HTTP request/response
+        response = self.request(method=method, headers=headers, body=body,
+                                **kwargs)
+
+        response_content_type = response_content_type or content_type
+        self._from_content_type(response, content_type=response_content_type)
+
+        # we can save some code & improve coverage by always doing this
+        if method != 'HEAD' and response.status_code >= 400:
+            self.assertValidErrorResponse(response)
+
+        # Contains the decoded response.body
+        return response
+
+    def _request(self, convert=True, **kwargs):
+        if convert:
+            response = self.restful_request(**kwargs)
+        else:
+            response = self.request(**kwargs)
+
+        self.assertValidResponseHeaders(response)
+        return response
+
+    def public_request(self, **kwargs):
+        return self._request(app=self.public_app, **kwargs)
+
+    def admin_request(self, **kwargs):
+        return self._request(app=self.admin_app, **kwargs)
+
+    def _get_token(self, body):
+        """Convenience method so that we can test authenticated requests."""
+        r = self.public_request(method='POST', path='/v2.0/tokens', body=body)
+        return self._get_token_id(r)
+
+    def get_unscoped_token(self):
+        """Convenience method so that we can test authenticated requests."""
+        return self._get_token({
+            'auth': {
+                'passwordCredentials': {
+                    'username': self.user_foo['name'],
+                    'password': self.user_foo['password'],
+                },
+            },
+        })
+
+    def get_scoped_token(self, tenant_id=None):
+        """Convenience method so that we can test authenticated requests."""
+        if not tenant_id:
+            tenant_id = self.tenant_bar['id']
+        return self._get_token({
+            'auth': {
+                'passwordCredentials': {
+                    'username': self.user_foo['name'],
+                    'password': self.user_foo['password'],
+                },
+                'tenantId': tenant_id,
+            },
+        })
+
+    def _get_token_id(self, r):
+        """Helper method to return a token ID from a response.
+
+        This needs to be overridden by child classes for on their content type.
+
+        """
+        raise NotImplementedError()
diff --git a/keystone-moon/keystone/tests/unit/saml2/idp_saml2_metadata.xml b/keystone-moon/keystone/tests/unit/saml2/idp_saml2_metadata.xml
new file mode 100644 (file)
index 0000000..db235f7
--- /dev/null
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ns0:EntityDescriptor xmlns:ns0="urn:oasis:names:tc:SAML:2.0:metadata" xmlns:ns1="http://www.w3.org/2000/09/xmldsig#" entityID="k2k.com/v3/OS-FEDERATION/idp" validUntil="2014-08-19T21:24:17.411289Z">
+  <ns0:IDPSSODescriptor protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol">
+    <ns0:KeyDescriptor use="signing">
+      <ns1:KeyInfo>
+        <ns1:X509Data>
+          <ns1:X509Certificate>MIIDpTCCAo0CAREwDQYJKoZIhvcNAQEFBQAwgZ4xCjAIBgNVBAUTATUxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQKEwlPcGVuU3RhY2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZrZXlzdG9uZUBvcGVuc3RhY2sub3JnMRQwEgYDVQQDEwtTZWxmIFNpZ25lZDAgFw0xMzA3MDkxNjI1MDBaGA8yMDcyMDEwMTE2MjUwMFowgY8xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQKEwlPcGVuU3RhY2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZrZXlzdG9uZUBvcGVuc3RhY2sub3JnMREwDwYDVQQDEwhLZXlzdG9uZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMTC6IdNd9Cg1DshcrT5gRVRF36nEmjSA9QWdik7B925PK70U4F6j4pz/5JL7plIo/8rJ4jJz9ccE7m0iA+IuABtEhEwXkG9rj47Oy0J4ZyDGSh2K1Bl78PA9zxXSzysUTSjBKdAh29dPYbJY7cgZJ0uC3AtfVceYiAOIi14SdFeZ0LZLDXBuLaqUmSMrmKwJ9wAMOCb/jbBP9/3Ycd0GYjlvrSBU4Bqb8/NHasyO4DpPN68OAoyD5r5jUtV8QZN03UjIsoux8e0lrL6+MVtJo0OfWvlSrlzS5HKSryY+uqqQEuxtZKpJM2MV85ujvjc8eDSChh2shhDjBem3FIlHKUCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAed9fHgdJrk+gZcO5gsqq6uURfDOuYD66GsSdZw4BqHjYAcnyWq2da+iw7Uxkqu7iLf2k4+Hu3xjDFrce479OwZkSnbXmqB7XspTGOuM8MgT7jB/ypKTOZ6qaZKSWK1Hta995hMrVVlhUNBLh0MPGqoVWYA4d7mblujgH9vp+4mpCciJagHks8K5FBmI+pobB+uFdSYDoRzX9LTpStspK4e3IoY8baILuGcdKimRNBv6ItG4hMrntAe1/nWMJyUu5rDTGf2V/vAaS0S/faJBwQSz1o38QHMTWHNspfwIdX3yMqI9u7/vYlz3rLy5WdBdUgZrZ3/VLmJTiJVZu5Owq4Q==
+</ns1:X509Certificate>
+        </ns1:X509Data>
+      </ns1:KeyInfo>
+    </ns0:KeyDescriptor>
+  </ns0:IDPSSODescriptor>
+  <ns0:Organization>
+    <ns0:OrganizationName xml:lang="en">openstack</ns0:OrganizationName>
+    <ns0:OrganizationDisplayName xml:lang="en">openstack</ns0:OrganizationDisplayName>
+    <ns0:OrganizationURL xml:lang="en">openstack</ns0:OrganizationURL>
+  </ns0:Organization>
+  <ns0:ContactPerson contactType="technical">
+    <ns0:Company>openstack</ns0:Company>
+    <ns0:GivenName>first</ns0:GivenName>
+    <ns0:SurName>lastname</ns0:SurName>
+    <ns0:EmailAddress>admin@example.com</ns0:EmailAddress>
+    <ns0:TelephoneNumber>555-555-5555</ns0:TelephoneNumber>
+  </ns0:ContactPerson>
+</ns0:EntityDescriptor>
diff --git a/keystone-moon/keystone/tests/unit/saml2/signed_saml2_assertion.xml b/keystone-moon/keystone/tests/unit/saml2/signed_saml2_assertion.xml
new file mode 100644 (file)
index 0000000..410f938
--- /dev/null
@@ -0,0 +1,63 @@
+<ns0:Assertion xmlns:ns0="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:ns1="http://www.w3.org/2000/09/xmldsig#" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ID="9a22528bfe194b2880edce5d60414d6a" IssueInstant="2014-08-19T10:53:57Z" Version="2.0">
+  <ns0:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">https://acme.com/FIM/sps/openstack/saml20</ns0:Issuer>
+  <ns1:Signature>
+    <ns1:SignedInfo>
+      <ns1:CanonicalizationMethod Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#" />
+      <ns1:SignatureMethod Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1" />
+      <ns1:Reference URI="#9a22528bfe194b2880edce5d60414d6a">
+        <ns1:Transforms>
+          <ns1:Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature" />
+          <ns1:Transform Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#" />
+        </ns1:Transforms>
+        <ns1:DigestMethod Algorithm="http://www.w3.org/2000/09/xmldsig#sha1" />
+        <ns1:DigestValue>Lem2TKyYt+/tJy2iSos1t0KxcJE=</ns1:DigestValue>
+      </ns1:Reference>
+    </ns1:SignedInfo>
+    <ns1:SignatureValue>b//GXtGeCIJPFsMAHrx4+3yjrL4smSpRLXG9PB3TLMJvU4fx8n2PzK7+VbtWNbZG
+vSgbvbQR52jq77iyaRfQ2iELuFEY+YietLRi7hsitkJCEayPmU+BDlNIGuCXZjAy
+7tmtGFkLlZZJaom1jAzHfZ5JPjZdM5hvQwrhCI2Kzyk=</ns1:SignatureValue>
+    <ns1:KeyInfo>
+      <ns1:X509Data>
+        <ns1:X509Certificate>MIICtjCCAh+gAwIBAgIJAJTeBUN2i9ZNMA0GCSqGSIb3DQEBBQUAME4xCzAJBgNV
+BAYTAkhSMQ8wDQYDVQQIEwZaYWdyZWIxITAfBgNVBAoTGE5la2Egb3JnYW5pemFj
+aWphIGQuby5vLjELMAkGA1UEAxMCQ0EwHhcNMTIxMjI4MTYwODA1WhcNMTQxMjI4
+MTYwODA1WjBvMQswCQYDVQQGEwJIUjEPMA0GA1UECBMGWmFncmViMQ8wDQYDVQQH
+EwZaYWdyZWIxITAfBgNVBAoTGE5la2Egb3JnYW5pemFjaWphIGQuby5vLjEbMBkG
+A1UEAxMSUHJvZ3JhbWVyc2thIGZpcm1hMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB
+iQKBgQCgWApHV5cma0GY/v/vmwgciDQBgITcitx2rG0F+ghXtGiEJeK75VY7jQwE
+UFCbgV+AaOY2NQChK2FKec7Hss/5y+jbWfX2yVwX6TYcCwnOGXenz+cgx2Fwqpu3
+ncL6dYJMfdbKvojBaJQLJTaNjRJsZACButDsDtXDSH9QaRy+hQIDAQABo3sweTAJ
+BgNVHRMEAjAAMCwGCWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0
+aWZpY2F0ZTAdBgNVHQ4EFgQUSo9ThP/MOg8QIRWxoPo8qKR8O2wwHwYDVR0jBBgw
+FoAUAelckr4bx8MwZ7y+VlHE46Mbo+cwDQYJKoZIhvcNAQEFBQADgYEAy19Z7Z5/
+/MlWkogu41s0RxL9ffG60QQ0Y8hhDTmgHNx1itj0wT8pB7M4KVMbZ4hjjSFsfRq4
+Vj7jm6LwU0WtZ3HGl8TygTh8AAJvbLROnTjLL5MqI9d9pKvIIfZ2Qs3xmJ7JEv4H
+UHeBXxQq/GmfBv3l+V5ObQ+EHKnyDodLHCk=</ns1:X509Certificate>
+      </ns1:X509Data>
+    </ns1:KeyInfo>
+  </ns1:Signature>
+  <ns0:Subject>
+    <ns0:NameID Format="urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress">test_user</ns0:NameID>
+    <ns0:SubjectConfirmation Method="urn:oasis:names:tc:SAML:2.0:cm:bearer">
+      <ns0:SubjectConfirmationData NotOnOrAfter="2014-08-19T11:53:57.243106Z" Recipient="http://beta.com/Shibboleth.sso/SAML2/POST" />
+    </ns0:SubjectConfirmation>
+  </ns0:Subject>
+  <ns0:AuthnStatement AuthnInstant="2014-08-19T10:53:57Z" SessionIndex="4e3430a9f8b941e69c133293a7a960a1" SessionNotOnOrAfter="2014-08-19T11:53:57.243106Z">
+    <ns0:AuthnContext>
+      <ns0:AuthnContextClassRef>urn:oasis:names:tc:SAML:2.0:ac:classes:Password</ns0:AuthnContextClassRef>
+      <ns0:AuthenticatingAuthority>https://acme.com/FIM/sps/openstack/saml20</ns0:AuthenticatingAuthority>
+    </ns0:AuthnContext>
+  </ns0:AuthnStatement>
+  <ns0:AttributeStatement>
+    <ns0:Attribute FriendlyName="keystone_user" Name="user" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri">
+      <ns0:AttributeValue xsi:type="xs:string">test_user</ns0:AttributeValue>
+    </ns0:Attribute>
+    <ns0:Attribute FriendlyName="keystone_roles" Name="roles" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri">
+      <ns0:AttributeValue xsi:type="xs:string">admin</ns0:AttributeValue>
+      <ns0:AttributeValue xsi:type="xs:string">member</ns0:AttributeValue>
+    </ns0:Attribute>
+    <ns0:Attribute FriendlyName="keystone_project" Name="project" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri">
+      <ns0:AttributeValue xsi:type="xs:string">development</ns0:AttributeValue>
+    </ns0:Attribute>
+  </ns0:AttributeStatement>
+</ns0:Assertion>
diff --git a/keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py b/keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py
new file mode 100644 (file)
index 0000000..e0159b7
--- /dev/null
@@ -0,0 +1,1129 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import uuid
+
+from testtools import matchers
+
+# NOTE(morganfainberg): import endpoint filter to populate the SQL model
+from keystone.contrib import endpoint_filter  # noqa
+from keystone.tests.unit import test_v3
+
+
+class TestExtensionCase(test_v3.RestfulTestCase):
+
+    EXTENSION_NAME = 'endpoint_filter'
+    EXTENSION_TO_ADD = 'endpoint_filter_extension'
+
+    def config_overrides(self):
+        super(TestExtensionCase, self).config_overrides()
+        self.config_fixture.config(
+            group='catalog',
+            driver='keystone.contrib.endpoint_filter.backends.catalog_sql.'
+                   'EndpointFilterCatalog')
+
+    def setUp(self):
+        super(TestExtensionCase, self).setUp()
+        self.default_request_url = (
+            '/OS-EP-FILTER/projects/%(project_id)s'
+            '/endpoints/%(endpoint_id)s' % {
+                'project_id': self.default_domain_project_id,
+                'endpoint_id': self.endpoint_id})
+
+
+class EndpointFilterCRUDTestCase(TestExtensionCase):
+
+    def test_create_endpoint_project_association(self):
+        """PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
+
+        Valid endpoint and project id test case.
+
+        """
+        self.put(self.default_request_url,
+                 body='',
+                 expected_status=204)
+
+    def test_create_endpoint_project_association_with_invalid_project(self):
+        """PUT OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
+
+        Invalid project id test case.
+
+        """
+        self.put('/OS-EP-FILTER/projects/%(project_id)s'
+                 '/endpoints/%(endpoint_id)s' % {
+                     'project_id': uuid.uuid4().hex,
+                     'endpoint_id': self.endpoint_id},
+                 body='',
+                 expected_status=404)
+
+    def test_create_endpoint_project_association_with_invalid_endpoint(self):
+        """PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
+
+        Invalid endpoint id test case.
+
+        """
+        self.put('/OS-EP-FILTER/projects/%(project_id)s'
+                 '/endpoints/%(endpoint_id)s' % {
+                     'project_id': self.default_domain_project_id,
+                     'endpoint_id': uuid.uuid4().hex},
+                 body='',
+                 expected_status=404)
+
+    def test_create_endpoint_project_association_with_unexpected_body(self):
+        """PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
+
+        Unexpected body in request. The body should be ignored.
+
+        """
+        self.put(self.default_request_url,
+                 body={'project_id': self.default_domain_project_id},
+                 expected_status=204)
+
+    def test_check_endpoint_project_association(self):
+        """HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
+
+        Valid project and endpoint id test case.
+
+        """
+        self.put(self.default_request_url,
+                 body='',
+                 expected_status=204)
+        self.head('/OS-EP-FILTER/projects/%(project_id)s'
+                  '/endpoints/%(endpoint_id)s' % {
+                      'project_id': self.default_domain_project_id,
+                      'endpoint_id': self.endpoint_id},
+                  expected_status=204)
+
+    def test_check_endpoint_project_association_with_invalid_project(self):
+        """HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
+
+        Invalid project id test case.
+
+        """
+        self.put(self.default_request_url)
+        self.head('/OS-EP-FILTER/projects/%(project_id)s'
+                  '/endpoints/%(endpoint_id)s' % {
+                      'project_id': uuid.uuid4().hex,
+                      'endpoint_id': self.endpoint_id},
+                  body='',
+                  expected_status=404)
+
+    def test_check_endpoint_project_association_with_invalid_endpoint(self):
+        """HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
+
+        Invalid endpoint id test case.
+
+        """
+        self.put(self.default_request_url)
+        self.head('/OS-EP-FILTER/projects/%(project_id)s'
+                  '/endpoints/%(endpoint_id)s' % {
+                      'project_id': self.default_domain_project_id,
+                      'endpoint_id': uuid.uuid4().hex},
+                  body='',
+                  expected_status=404)
+
+    def test_list_endpoints_associated_with_valid_project(self):
+        """GET /OS-EP-FILTER/projects/{project_id}/endpoints
+
+        Valid project and endpoint id test case.
+
+        """
+        self.put(self.default_request_url)
+        resource_url = '/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
+                       'project_id': self.default_domain_project_id}
+        r = self.get(resource_url)
+        self.assertValidEndpointListResponse(r, self.endpoint,
+                                             resource_url=resource_url)
+
+    def test_list_endpoints_associated_with_invalid_project(self):
+        """GET /OS-EP-FILTER/projects/{project_id}/endpoints
+
+        Invalid project id test case.
+
+        """
+        self.put(self.default_request_url)
+        self.get('/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
+                 'project_id': uuid.uuid4().hex},
+                 body='',
+                 expected_status=404)
+
+    def test_list_projects_associated_with_endpoint(self):
+        """GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
+
+        Valid endpoint-project association test case.
+
+        """
+        self.put(self.default_request_url)
+        resource_url = '/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' % {
+                       'endpoint_id': self.endpoint_id}
+        r = self.get(resource_url)
+        self.assertValidProjectListResponse(r, self.default_domain_project,
+                                            resource_url=resource_url)
+
+    def test_list_projects_with_no_endpoint_project_association(self):
+        """GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
+
+        Valid endpoint id but no endpoint-project associations test case.
+
+        """
+        r = self.get('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' %
+                     {'endpoint_id': self.endpoint_id},
+                     expected_status=200)
+        self.assertValidProjectListResponse(r, expected_length=0)
+
+    def test_list_projects_associated_with_invalid_endpoint(self):
+        """GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
+
+        Invalid endpoint id test case.
+
+        """
+        self.get('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' %
+                 {'endpoint_id': uuid.uuid4().hex},
+                 expected_status=404)
+
+    def test_remove_endpoint_project_association(self):
+        """DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
+
+        Valid project id and endpoint id test case.
+
+        """
+        self.put(self.default_request_url)
+        self.delete('/OS-EP-FILTER/projects/%(project_id)s'
+                    '/endpoints/%(endpoint_id)s' % {
+                        'project_id': self.default_domain_project_id,
+                        'endpoint_id': self.endpoint_id},
+                    expected_status=204)
+
+    def test_remove_endpoint_project_association_with_invalid_project(self):
+        """DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
+
+        Invalid project id test case.
+
+        """
+        self.put(self.default_request_url)
+        self.delete('/OS-EP-FILTER/projects/%(project_id)s'
+                    '/endpoints/%(endpoint_id)s' % {
+                        'project_id': uuid.uuid4().hex,
+                        'endpoint_id': self.endpoint_id},
+                    body='',
+                    expected_status=404)
+
+    def test_remove_endpoint_project_association_with_invalid_endpoint(self):
+        """DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
+
+        Invalid endpoint id test case.
+
+        """
+        self.put(self.default_request_url)
+        self.delete('/OS-EP-FILTER/projects/%(project_id)s'
+                    '/endpoints/%(endpoint_id)s' % {
+                        'project_id': self.default_domain_project_id,
+                        'endpoint_id': uuid.uuid4().hex},
+                    body='',
+                    expected_status=404)
+
+    def test_endpoint_project_association_cleanup_when_project_deleted(self):
+        self.put(self.default_request_url)
+        association_url = ('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' %
+                           {'endpoint_id': self.endpoint_id})
+        r = self.get(association_url, expected_status=200)
+        self.assertValidProjectListResponse(r, expected_length=1)
+
+        self.delete('/projects/%(project_id)s' % {
+            'project_id': self.default_domain_project_id})
+
+        r = self.get(association_url, expected_status=200)
+        self.assertValidProjectListResponse(r, expected_length=0)
+
+    def test_endpoint_project_association_cleanup_when_endpoint_deleted(self):
+        self.put(self.default_request_url)
+        association_url = '/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
+            'project_id': self.default_domain_project_id}
+        r = self.get(association_url, expected_status=200)
+        self.assertValidEndpointListResponse(r, expected_length=1)
+
+        self.delete('/endpoints/%(endpoint_id)s' % {
+            'endpoint_id': self.endpoint_id})
+
+        r = self.get(association_url, expected_status=200)
+        self.assertValidEndpointListResponse(r, expected_length=0)
+
+
+class EndpointFilterTokenRequestTestCase(TestExtensionCase):
+
+    def test_project_scoped_token_using_endpoint_filter(self):
+        """Verify endpoints from project scoped token filtered."""
+        # create a project to work with
+        ref = self.new_project_ref(domain_id=self.domain_id)
+        r = self.post('/projects', body={'project': ref})
+        project = self.assertValidProjectResponse(r, ref)
+
+        # grant the user a role on the project
+        self.put(
+            '/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
+                'user_id': self.user['id'],
+                'project_id': project['id'],
+                'role_id': self.role['id']})
+
+        # set the user's preferred project
+        body = {'user': {'default_project_id': project['id']}}
+        r = self.patch('/users/%(user_id)s' % {
+            'user_id': self.user['id']},
+            body=body)
+        self.assertValidUserResponse(r)
+
+        # add one endpoint to the project
+        self.put('/OS-EP-FILTER/projects/%(project_id)s'
+                 '/endpoints/%(endpoint_id)s' % {
+                     'project_id': project['id'],
+                     'endpoint_id': self.endpoint_id},
+                 body='',
+                 expected_status=204)
+
+        # attempt to authenticate without requesting a project
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'])
+        r = self.post('/auth/tokens', body=auth_data)
+        self.assertValidProjectScopedTokenResponse(
+            r,
+            require_catalog=True,
+            endpoint_filter=True,
+            ep_filter_assoc=1)
+        self.assertEqual(r.result['token']['project']['id'], project['id'])
+
+    def test_default_scoped_token_using_endpoint_filter(self):
+        """Verify endpoints from default scoped token filtered."""
+        # add one endpoint to default project
+        self.put('/OS-EP-FILTER/projects/%(project_id)s'
+                 '/endpoints/%(endpoint_id)s' % {
+                     'project_id': self.project['id'],
+                     'endpoint_id': self.endpoint_id},
+                 body='',
+                 expected_status=204)
+
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=self.project['id'])
+        r = self.post('/auth/tokens', body=auth_data)
+        self.assertValidProjectScopedTokenResponse(
+            r,
+            require_catalog=True,
+            endpoint_filter=True,
+            ep_filter_assoc=1)
+        self.assertEqual(r.result['token']['project']['id'],
+                         self.project['id'])
+
+    def test_project_scoped_token_with_no_catalog_using_endpoint_filter(self):
+        """Verify endpoint filter when project scoped token returns no catalog.
+
+        Test that the project scoped token response is valid for a given
+        endpoint-project association when no service catalog is returned.
+
+        """
+        # create a project to work with
+        ref = self.new_project_ref(domain_id=self.domain_id)
+        r = self.post('/projects', body={'project': ref})
+        project = self.assertValidProjectResponse(r, ref)
+
+        # grant the user a role on the project
+        self.put(
+            '/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
+                'user_id': self.user['id'],
+                'project_id': project['id'],
+                'role_id': self.role['id']})
+
+        # set the user's preferred project
+        body = {'user': {'default_project_id': project['id']}}
+        r = self.patch('/users/%(user_id)s' % {
+            'user_id': self.user['id']},
+            body=body)
+        self.assertValidUserResponse(r)
+
+        # add one endpoint to the project
+        self.put('/OS-EP-FILTER/projects/%(project_id)s'
+                 '/endpoints/%(endpoint_id)s' % {
+                     'project_id': project['id'],
+                     'endpoint_id': self.endpoint_id},
+                 body='',
+                 expected_status=204)
+
+        # attempt to authenticate without requesting a project
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'])
+        r = self.post('/auth/tokens?nocatalog', body=auth_data)
+        self.assertValidProjectScopedTokenResponse(
+            r,
+            require_catalog=False,
+            endpoint_filter=True,
+            ep_filter_assoc=1)
+        self.assertEqual(r.result['token']['project']['id'], project['id'])
+
+    def test_default_scoped_token_with_no_catalog_using_endpoint_filter(self):
+        """Verify endpoint filter when default scoped token returns no catalog.
+
+        Test that the default project scoped token response is valid for a
+        given endpoint-project association when no service catalog is returned.
+
+        """
+        # add one endpoint to default project
+        self.put('/OS-EP-FILTER/projects/%(project_id)s'
+                 '/endpoints/%(endpoint_id)s' % {
+                     'project_id': self.project['id'],
+                     'endpoint_id': self.endpoint_id},
+                 body='',
+                 expected_status=204)
+
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=self.project['id'])
+        r = self.post('/auth/tokens?nocatalog', body=auth_data)
+        self.assertValidProjectScopedTokenResponse(
+            r,
+            require_catalog=False,
+            endpoint_filter=True,
+            ep_filter_assoc=1)
+        self.assertEqual(r.result['token']['project']['id'],
+                         self.project['id'])
+
+    def test_project_scoped_token_with_no_endpoint_project_association(self):
+        """Verify endpoint filter when no endpoint-project association.
+
+        Test that the project scoped token response is valid when there are
+        no endpoint-project associations defined.
+
+        """
+        # create a project to work with
+        ref = self.new_project_ref(domain_id=self.domain_id)
+        r = self.post('/projects', body={'project': ref})
+        project = self.assertValidProjectResponse(r, ref)
+
+        # grant the user a role on the project
+        self.put(
+            '/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
+                'user_id': self.user['id'],
+                'project_id': project['id'],
+                'role_id': self.role['id']})
+
+        # set the user's preferred project
+        body = {'user': {'default_project_id': project['id']}}
+        r = self.patch('/users/%(user_id)s' % {
+            'user_id': self.user['id']},
+            body=body)
+        self.assertValidUserResponse(r)
+
+        # attempt to authenticate without requesting a project
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'])
+        r = self.post('/auth/tokens?nocatalog', body=auth_data)
+        self.assertValidProjectScopedTokenResponse(
+            r,
+            require_catalog=False,
+            endpoint_filter=True)
+        self.assertEqual(r.result['token']['project']['id'], project['id'])
+
+    def test_default_scoped_token_with_no_endpoint_project_association(self):
+        """Verify endpoint filter when no endpoint-project association.
+
+        Test that the default project scoped token response is valid when
+        there are no endpoint-project associations defined.
+
+        """
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=self.project['id'])
+        r = self.post('/auth/tokens?nocatalog', body=auth_data)
+        self.assertValidProjectScopedTokenResponse(
+            r,
+            require_catalog=False,
+            endpoint_filter=True,)
+        self.assertEqual(r.result['token']['project']['id'],
+                         self.project['id'])
+
+    def test_invalid_endpoint_project_association(self):
+        """Verify an invalid endpoint-project association is handled."""
+        # add first endpoint to default project
+        self.put('/OS-EP-FILTER/projects/%(project_id)s'
+                 '/endpoints/%(endpoint_id)s' % {
+                     'project_id': self.project['id'],
+                     'endpoint_id': self.endpoint_id},
+                 body='',
+                 expected_status=204)
+
+        # create a second temporary endpoint
+        self.endpoint_id2 = uuid.uuid4().hex
+        self.endpoint2 = self.new_endpoint_ref(service_id=self.service_id)
+        self.endpoint2['id'] = self.endpoint_id2
+        self.catalog_api.create_endpoint(
+            self.endpoint_id2,
+            self.endpoint2.copy())
+
+        # add second endpoint to default project
+        self.put('/OS-EP-FILTER/projects/%(project_id)s'
+                 '/endpoints/%(endpoint_id)s' % {
+                     'project_id': self.project['id'],
+                     'endpoint_id': self.endpoint_id2},
+                 body='',
+                 expected_status=204)
+
+        # remove the temporary reference
+        # this will create inconsistency in the endpoint filter table
+        # which is fixed during the catalog creation for token request
+        self.catalog_api.delete_endpoint(self.endpoint_id2)
+
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=self.project['id'])
+        r = self.post('/auth/tokens', body=auth_data)
+        self.assertValidProjectScopedTokenResponse(
+            r,
+            require_catalog=True,
+            endpoint_filter=True,
+            ep_filter_assoc=1)
+        self.assertEqual(r.result['token']['project']['id'],
+                         self.project['id'])
+
+    def test_disabled_endpoint(self):
+        """Test that a disabled endpoint is handled."""
+        # Add an enabled endpoint to the default project
+        self.put('/OS-EP-FILTER/projects/%(project_id)s'
+                 '/endpoints/%(endpoint_id)s' % {
+                     'project_id': self.project['id'],
+                     'endpoint_id': self.endpoint_id},
+                 expected_status=204)
+
+        # Add a disabled endpoint to the default project.
+
+        # Create a disabled endpoint that's like the enabled one.
+        disabled_endpoint_ref = copy.copy(self.endpoint)
+        disabled_endpoint_id = uuid.uuid4().hex
+        disabled_endpoint_ref.update({
+            'id': disabled_endpoint_id,
+            'enabled': False,
+            'interface': 'internal'
+        })
+        self.catalog_api.create_endpoint(disabled_endpoint_id,
+                                         disabled_endpoint_ref)
+
+        self.put('/OS-EP-FILTER/projects/%(project_id)s'
+                 '/endpoints/%(endpoint_id)s' % {
+                     'project_id': self.project['id'],
+                     'endpoint_id': disabled_endpoint_id},
+                 expected_status=204)
+
+        # Authenticate to get token with catalog
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=self.project['id'])
+        r = self.post('/auth/tokens', body=auth_data)
+
+        endpoints = r.result['token']['catalog'][0]['endpoints']
+        endpoint_ids = [ep['id'] for ep in endpoints]
+        self.assertEqual([self.endpoint_id], endpoint_ids)
+
+    def test_multiple_endpoint_project_associations(self):
+
+        def _create_an_endpoint():
+            endpoint_ref = self.new_endpoint_ref(service_id=self.service_id)
+            r = self.post('/endpoints', body={'endpoint': endpoint_ref})
+            return r.result['endpoint']['id']
+
+        # create three endpoints
+        endpoint_id1 = _create_an_endpoint()
+        endpoint_id2 = _create_an_endpoint()
+        _create_an_endpoint()
+
+        # only associate two endpoints with project
+        self.put('/OS-EP-FILTER/projects/%(project_id)s'
+                 '/endpoints/%(endpoint_id)s' % {
+                     'project_id': self.project['id'],
+                     'endpoint_id': endpoint_id1},
+                 expected_status=204)
+        self.put('/OS-EP-FILTER/projects/%(project_id)s'
+                 '/endpoints/%(endpoint_id)s' % {
+                     'project_id': self.project['id'],
+                     'endpoint_id': endpoint_id2},
+                 expected_status=204)
+
+        # there should be only two endpoints in token catalog
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=self.project['id'])
+        r = self.post('/auth/tokens', body=auth_data)
+        self.assertValidProjectScopedTokenResponse(
+            r,
+            require_catalog=True,
+            endpoint_filter=True,
+            ep_filter_assoc=2)
+
+
+class JsonHomeTests(TestExtensionCase, test_v3.JsonHomeTestMixin):
+    JSON_HOME_DATA = {
+        'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
+        '1.0/rel/endpoint_projects': {
+            'href-template': '/OS-EP-FILTER/endpoints/{endpoint_id}/projects',
+            'href-vars': {
+                'endpoint_id':
+                'http://docs.openstack.org/api/openstack-identity/3/param/'
+                'endpoint_id',
+            },
+        },
+        'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
+        '1.0/rel/endpoint_groups': {
+            'href': '/OS-EP-FILTER/endpoint_groups',
+        },
+        'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
+        '1.0/rel/endpoint_group': {
+            'href-template': '/OS-EP-FILTER/endpoint_groups/'
+            '{endpoint_group_id}',
+            'href-vars': {
+                'endpoint_group_id':
+                'http://docs.openstack.org/api/openstack-identity/3/'
+                'ext/OS-EP-FILTER/1.0/param/endpoint_group_id',
+            },
+        },
+        'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
+        '1.0/rel/endpoint_group_to_project_association': {
+            'href-template': '/OS-EP-FILTER/endpoint_groups/'
+            '{endpoint_group_id}/projects/{project_id}',
+            'href-vars': {
+                'project_id':
+                'http://docs.openstack.org/api/openstack-identity/3/param/'
+                'project_id',
+                'endpoint_group_id':
+                'http://docs.openstack.org/api/openstack-identity/3/'
+                'ext/OS-EP-FILTER/1.0/param/endpoint_group_id',
+            },
+        },
+        'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
+        '1.0/rel/projects_associated_with_endpoint_group': {
+            'href-template': '/OS-EP-FILTER/endpoint_groups/'
+            '{endpoint_group_id}/projects',
+            'href-vars': {
+                'endpoint_group_id':
+                'http://docs.openstack.org/api/openstack-identity/3/'
+                'ext/OS-EP-FILTER/1.0/param/endpoint_group_id',
+            },
+        },
+        'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
+        '1.0/rel/endpoints_in_endpoint_group': {
+            'href-template': '/OS-EP-FILTER/endpoint_groups/'
+            '{endpoint_group_id}/endpoints',
+            'href-vars': {
+                'endpoint_group_id':
+                'http://docs.openstack.org/api/openstack-identity/3/'
+                'ext/OS-EP-FILTER/1.0/param/endpoint_group_id',
+            },
+        },
+    }
+
+
+class EndpointGroupCRUDTestCase(TestExtensionCase):
+
+    DEFAULT_ENDPOINT_GROUP_BODY = {
+        'endpoint_group': {
+            'description': 'endpoint group description',
+            'filters': {
+                'interface': 'admin'
+            },
+            'name': 'endpoint_group_name'
+        }
+    }
+
+    DEFAULT_ENDPOINT_GROUP_URL = '/OS-EP-FILTER/endpoint_groups'
+
+    def test_create_endpoint_group(self):
+        """POST /OS-EP-FILTER/endpoint_groups
+
+        Valid endpoint group test case.
+
+        """
+        r = self.post(self.DEFAULT_ENDPOINT_GROUP_URL,
+                      body=self.DEFAULT_ENDPOINT_GROUP_BODY)
+        expected_filters = (self.DEFAULT_ENDPOINT_GROUP_BODY
+                            ['endpoint_group']['filters'])
+        expected_name = (self.DEFAULT_ENDPOINT_GROUP_BODY
+                         ['endpoint_group']['name'])
+        self.assertEqual(expected_filters,
+                         r.result['endpoint_group']['filters'])
+        self.assertEqual(expected_name, r.result['endpoint_group']['name'])
+        self.assertThat(
+            r.result['endpoint_group']['links']['self'],
+            matchers.EndsWith(
+                '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
+                    'endpoint_group_id': r.result['endpoint_group']['id']}))
+
+    def test_create_invalid_endpoint_group(self):
+        """POST /OS-EP-FILTER/endpoint_groups
+
+        Invalid endpoint group creation test case.
+
+        """
+        invalid_body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
+        invalid_body['endpoint_group']['filters'] = {'foobar': 'admin'}
+        self.post(self.DEFAULT_ENDPOINT_GROUP_URL,
+                  body=invalid_body,
+                  expected_status=400)
+
+    def test_get_endpoint_group(self):
+        """GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
+
+        Valid endpoint group test case.
+
+        """
+        # create an endpoint group to work with
+        response = self.post(self.DEFAULT_ENDPOINT_GROUP_URL,
+                             body=self.DEFAULT_ENDPOINT_GROUP_BODY)
+        endpoint_group_id = response.result['endpoint_group']['id']
+        endpoint_group_filters = response.result['endpoint_group']['filters']
+        endpoint_group_name = response.result['endpoint_group']['name']
+        url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
+            'endpoint_group_id': endpoint_group_id}
+        self.get(url)
+        self.assertEqual(endpoint_group_id,
+                         response.result['endpoint_group']['id'])
+        self.assertEqual(endpoint_group_filters,
+                         response.result['endpoint_group']['filters'])
+        self.assertEqual(endpoint_group_name,
+                         response.result['endpoint_group']['name'])
+        self.assertThat(response.result['endpoint_group']['links']['self'],
+                        matchers.EndsWith(url))
+
+    def test_get_invalid_endpoint_group(self):
+        """GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
+
+        Invalid endpoint group test case.
+
+        """
+        endpoint_group_id = 'foobar'
+        url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
+            'endpoint_group_id': endpoint_group_id}
+        self.get(url, expected_status=404)
+
+    def test_check_endpoint_group(self):
+        """HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
+
+        Valid endpoint_group_id test case.
+
+        """
+        # create an endpoint group to work with
+        endpoint_group_id = self._create_valid_endpoint_group(
+            self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
+        url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
+            'endpoint_group_id': endpoint_group_id}
+        self.head(url, expected_status=200)
+
+    def test_check_invalid_endpoint_group(self):
+        """HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
+
+        Invalid endpoint_group_id test case.
+
+        """
+        endpoint_group_id = 'foobar'
+        url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
+            'endpoint_group_id': endpoint_group_id}
+        self.head(url, expected_status=404)
+
+    def test_patch_endpoint_group(self):
+        """PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}
+
+        Valid endpoint group patch test case.
+
+        """
+        body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
+        body['endpoint_group']['filters'] = {'region_id': 'UK'}
+        body['endpoint_group']['name'] = 'patch_test'
+        # create an endpoint group to work with
+        endpoint_group_id = self._create_valid_endpoint_group(
+            self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
+        url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
+            'endpoint_group_id': endpoint_group_id}
+        r = self.patch(url, body=body)
+        self.assertEqual(endpoint_group_id,
+                         r.result['endpoint_group']['id'])
+        self.assertEqual(body['endpoint_group']['filters'],
+                         r.result['endpoint_group']['filters'])
+        self.assertThat(r.result['endpoint_group']['links']['self'],
+                        matchers.EndsWith(url))
+
+    def test_patch_nonexistent_endpoint_group(self):
+        """PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}
+
+        Invalid endpoint group patch test case.
+
+        """
+        body = {
+            'endpoint_group': {
+                'name': 'patch_test'
+            }
+        }
+        url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
+            'endpoint_group_id': 'ABC'}
+        self.patch(url, body=body, expected_status=404)
+
+    def test_patch_invalid_endpoint_group(self):
+        """PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}
+
+        Valid endpoint group patch test case.
+
+        """
+        body = {
+            'endpoint_group': {
+                'description': 'endpoint group description',
+                'filters': {
+                    'region': 'UK'
+                },
+                'name': 'patch_test'
+            }
+        }
+        # create an endpoint group to work with
+        endpoint_group_id = self._create_valid_endpoint_group(
+            self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
+        url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
+            'endpoint_group_id': endpoint_group_id}
+        self.patch(url, body=body, expected_status=400)
+
+        # Perform a GET call to ensure that the content remains
+        # the same (as DEFAULT_ENDPOINT_GROUP_BODY) after attempting to update
+        # with an invalid filter
+        url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
+            'endpoint_group_id': endpoint_group_id}
+        r = self.get(url)
+        del r.result['endpoint_group']['id']
+        del r.result['endpoint_group']['links']
+        self.assertDictEqual(self.DEFAULT_ENDPOINT_GROUP_BODY, r.result)
+
+    def test_delete_endpoint_group(self):
+        """GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
+
+        Valid endpoint group test case.
+
+        """
+        # create an endpoint group to work with
+        endpoint_group_id = self._create_valid_endpoint_group(
+            self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
+        url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
+            'endpoint_group_id': endpoint_group_id}
+        self.delete(url)
+        self.get(url, expected_status=404)
+
+    def test_delete_invalid_endpoint_group(self):
+        """GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
+
+        Invalid endpoint group test case.
+
+        """
+        endpoint_group_id = 'foobar'
+        url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
+            'endpoint_group_id': endpoint_group_id}
+        self.delete(url, expected_status=404)
+
+    def test_add_endpoint_group_to_project(self):
+        """Create a valid endpoint group and project association."""
+        endpoint_group_id = self._create_valid_endpoint_group(
+            self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
+        self._create_endpoint_group_project_association(endpoint_group_id,
+                                                        self.project_id)
+
+    def test_add_endpoint_group_to_project_with_invalid_project_id(self):
+        """Create an invalid endpoint group and project association."""
+        # create an endpoint group to work with
+        endpoint_group_id = self._create_valid_endpoint_group(
+            self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
+
+        # associate endpoint group with project
+        project_id = uuid.uuid4().hex
+        url = self._get_project_endpoint_group_url(
+            endpoint_group_id, project_id)
+        self.put(url, expected_status=404)
+
+    def test_get_endpoint_group_in_project(self):
+        """Test retrieving project endpoint group association."""
+        # create an endpoint group to work with
+        endpoint_group_id = self._create_valid_endpoint_group(
+            self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
+
+        # associate endpoint group with project
+        url = self._get_project_endpoint_group_url(
+            endpoint_group_id, self.project_id)
+        self.put(url)
+        response = self.get(url)
+        self.assertEqual(
+            endpoint_group_id,
+            response.result['project_endpoint_group']['endpoint_group_id'])
+        self.assertEqual(
+            self.project_id,
+            response.result['project_endpoint_group']['project_id'])
+
+    def test_get_invalid_endpoint_group_in_project(self):
+        """Test retrieving project endpoint group association."""
+        endpoint_group_id = uuid.uuid4().hex
+        project_id = uuid.uuid4().hex
+        url = self._get_project_endpoint_group_url(
+            endpoint_group_id, project_id)
+        self.get(url, expected_status=404)
+
+    def test_check_endpoint_group_to_project(self):
+        """Test HEAD with a valid endpoint group and project association."""
+        endpoint_group_id = self._create_valid_endpoint_group(
+            self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
+        self._create_endpoint_group_project_association(endpoint_group_id,
+                                                        self.project_id)
+        url = self._get_project_endpoint_group_url(
+            endpoint_group_id, self.project_id)
+        self.head(url, expected_status=200)
+
+    def test_check_endpoint_group_to_project_with_invalid_project_id(self):
+        """Test HEAD with an invalid endpoint group and project association."""
+        # create an endpoint group to work with
+        endpoint_group_id = self._create_valid_endpoint_group(
+            self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
+
+        # create an endpoint group to project association
+        url = self._get_project_endpoint_group_url(
+            endpoint_group_id, self.project_id)
+        self.put(url)
+
+        # send a head request with an invalid project id
+        project_id = uuid.uuid4().hex
+        url = self._get_project_endpoint_group_url(
+            endpoint_group_id, project_id)
+        self.head(url, expected_status=404)
+
+    def test_list_endpoint_groups(self):
+        """GET /OS-EP-FILTER/endpoint_groups."""
+        # create an endpoint group to work with
+        endpoint_group_id = self._create_valid_endpoint_group(
+            self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
+
+        # recover all endpoint groups
+        url = '/OS-EP-FILTER/endpoint_groups'
+        r = self.get(url)
+        self.assertNotEmpty(r.result['endpoint_groups'])
+        self.assertEqual(endpoint_group_id,
+                         r.result['endpoint_groups'][0].get('id'))
+
+    def test_list_projects_associated_with_endpoint_group(self):
+        """GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects
+
+        Valid endpoint group test case.
+
+        """
+        # create an endpoint group to work with
+        endpoint_group_id = self._create_valid_endpoint_group(
+            self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
+
+        # associate endpoint group with project
+        self._create_endpoint_group_project_association(endpoint_group_id,
+                                                        self.project_id)
+
+        # recover list of projects associated with endpoint group
+        url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s'
+               '/projects' %
+               {'endpoint_group_id': endpoint_group_id})
+        self.get(url)
+
+    def test_list_endpoints_associated_with_endpoint_group(self):
+        """GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/endpoints
+
+        Valid endpoint group test case.
+
+        """
+        # create a service
+        service_ref = self.new_service_ref()
+        response = self.post(
+            '/services',
+            body={'service': service_ref})
+
+        service_id = response.result['service']['id']
+
+        # create an endpoint
+        endpoint_ref = self.new_endpoint_ref(service_id=service_id)
+        response = self.post(
+            '/endpoints',
+            body={'endpoint': endpoint_ref})
+        endpoint_id = response.result['endpoint']['id']
+
+        # create an endpoint group
+        body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
+        body['endpoint_group']['filters'] = {'service_id': service_id}
+        endpoint_group_id = self._create_valid_endpoint_group(
+            self.DEFAULT_ENDPOINT_GROUP_URL, body)
+
+        # create association
+        self._create_endpoint_group_project_association(endpoint_group_id,
+                                                        self.project_id)
+
+        # recover list of endpoints associated with endpoint group
+        url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s'
+               '/endpoints' % {'endpoint_group_id': endpoint_group_id})
+        r = self.get(url)
+        self.assertNotEmpty(r.result['endpoints'])
+        self.assertEqual(endpoint_id, r.result['endpoints'][0].get('id'))
+
+    def test_list_endpoints_associated_with_project_endpoint_group(self):
+        """GET /OS-EP-FILTER/projects/{project_id}/endpoints
+
+        Valid project, endpoint id, and endpoint group test case.
+
+        """
+        # create a temporary service
+        service_ref = self.new_service_ref()
+        response = self.post('/services', body={'service': service_ref})
+        service_id2 = response.result['service']['id']
+
+        # create additional endpoints
+        self._create_endpoint_and_associations(
+            self.default_domain_project_id, service_id2)
+        self._create_endpoint_and_associations(
+            self.default_domain_project_id)
+
+        # create project and endpoint association with default endpoint:
+        self.put(self.default_request_url)
+
+        # create an endpoint group that contains a different endpoint
+        body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
+        body['endpoint_group']['filters'] = {'service_id': service_id2}
+        endpoint_group_id = self._create_valid_endpoint_group(
+            self.DEFAULT_ENDPOINT_GROUP_URL, body)
+
+        # associate endpoint group with project
+        self._create_endpoint_group_project_association(
+            endpoint_group_id, self.default_domain_project_id)
+
+        # Now get a list of the filtered endpoints
+        endpoints_url = '/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
+            'project_id': self.default_domain_project_id}
+        r = self.get(endpoints_url)
+        endpoints = self.assertValidEndpointListResponse(r)
+        self.assertEqual(len(endpoints), 2)
+
+        # Now remove project endpoint group association
+        url = self._get_project_endpoint_group_url(
+            endpoint_group_id, self.default_domain_project_id)
+        self.delete(url)
+
+        # Now remove endpoint group
+        url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
+            'endpoint_group_id': endpoint_group_id}
+        self.delete(url)
+
+        r = self.get(endpoints_url)
+        endpoints = self.assertValidEndpointListResponse(r)
+        self.assertEqual(len(endpoints), 1)
+
+    def test_endpoint_group_project_cleanup_with_project(self):
+        # create endpoint group
+        endpoint_group_id = self._create_valid_endpoint_group(
+            self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
+
+        # create new project and associate with endpoint_group
+        project_ref = self.new_project_ref(domain_id=self.domain_id)
+        r = self.post('/projects', body={'project': project_ref})
+        project = self.assertValidProjectResponse(r, project_ref)
+        url = self._get_project_endpoint_group_url(endpoint_group_id,
+                                                   project['id'])
+        self.put(url)
+
+        # check that we can recover the project endpoint group association
+        self.get(url)
+
+        # Now delete the project and then try and retrieve the project
+        # endpoint group association again
+        self.delete('/projects/%(project_id)s' % {
+            'project_id': project['id']})
+        self.get(url, expected_status=404)
+
+    def test_endpoint_group_project_cleanup_with_endpoint_group(self):
+        # create endpoint group
+        endpoint_group_id = self._create_valid_endpoint_group(
+            self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
+
+        # create new project and associate with endpoint_group
+        project_ref = self.new_project_ref(domain_id=self.domain_id)
+        r = self.post('/projects', body={'project': project_ref})
+        project = self.assertValidProjectResponse(r, project_ref)
+        url = self._get_project_endpoint_group_url(endpoint_group_id,
+                                                   project['id'])
+        self.put(url)
+
+        # check that we can recover the project endpoint group association
+        self.get(url)
+
+        # now remove the project endpoint group association
+        self.delete(url)
+        self.get(url, expected_status=404)
+
+    def test_removing_an_endpoint_group_project(self):
+        # create an endpoint group
+        endpoint_group_id = self._create_valid_endpoint_group(
+            self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
+
+        # create an endpoint_group project
+        url = self._get_project_endpoint_group_url(
+            endpoint_group_id, self.default_domain_project_id)
+        self.put(url)
+
+        # remove the endpoint group project
+        self.delete(url)
+        self.get(url, expected_status=404)
+
+    def _create_valid_endpoint_group(self, url, body):
+        r = self.post(url, body=body)
+        return r.result['endpoint_group']['id']
+
+    def _create_endpoint_group_project_association(self,
+                                                   endpoint_group_id,
+                                                   project_id):
+        url = self._get_project_endpoint_group_url(endpoint_group_id,
+                                                   project_id)
+        self.put(url)
+
+    def _get_project_endpoint_group_url(self,
+                                        endpoint_group_id,
+                                        project_id):
+        return ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s'
+                '/projects/%(project_id)s' %
+                {'endpoint_group_id': endpoint_group_id,
+                 'project_id': project_id})
+
+    def _create_endpoint_and_associations(self, project_id, service_id=None):
+        """Creates an endpoint associated with service and project."""
+        if not service_id:
+            # create a new service
+            service_ref = self.new_service_ref()
+            response = self.post(
+                '/services', body={'service': service_ref})
+            service_id = response.result['service']['id']
+
+        # create endpoint
+        endpoint_ref = self.new_endpoint_ref(service_id=service_id)
+        response = self.post('/endpoints', body={'endpoint': endpoint_ref})
+        endpoint = response.result['endpoint']
+
+        # now add endpoint to project
+        self.put('/OS-EP-FILTER/projects/%(project_id)s'
+                 '/endpoints/%(endpoint_id)s' % {
+                     'project_id': self.project['id'],
+                     'endpoint_id': endpoint['id']})
+        return endpoint
diff --git a/keystone-moon/keystone/tests/unit/test_auth.py b/keystone-moon/keystone/tests/unit/test_auth.py
new file mode 100644 (file)
index 0000000..295e028
--- /dev/null
@@ -0,0 +1,1328 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import datetime
+import uuid
+
+import mock
+from oslo_config import cfg
+from oslo_utils import timeutils
+from testtools import matchers
+
+from keystone import assignment
+from keystone import auth
+from keystone.common import authorization
+from keystone import config
+from keystone import exception
+from keystone.models import token_model
+from keystone.tests import unit as tests
+from keystone.tests.unit import default_fixtures
+from keystone.tests.unit.ksfixtures import database
+from keystone import token
+from keystone.token import provider
+from keystone import trust
+
+
+CONF = cfg.CONF
+TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
+DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
+
+HOST_URL = 'http://keystone:5001'
+
+
+def _build_user_auth(token=None, user_id=None, username=None,
+                     password=None, tenant_id=None, tenant_name=None,
+                     trust_id=None):
+    """Build auth dictionary.
+
+    It will create an auth dictionary based on all the arguments
+    that it receives.
+    """
+    auth_json = {}
+    if token is not None:
+        auth_json['token'] = token
+    if username or password:
+        auth_json['passwordCredentials'] = {}
+    if username is not None:
+        auth_json['passwordCredentials']['username'] = username
+    if user_id is not None:
+        auth_json['passwordCredentials']['userId'] = user_id
+    if password is not None:
+        auth_json['passwordCredentials']['password'] = password
+    if tenant_name is not None:
+        auth_json['tenantName'] = tenant_name
+    if tenant_id is not None:
+        auth_json['tenantId'] = tenant_id
+    if trust_id is not None:
+        auth_json['trust_id'] = trust_id
+    return auth_json
+
+
+class AuthTest(tests.TestCase):
+    def setUp(self):
+        self.useFixture(database.Database())
+        super(AuthTest, self).setUp()
+
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+
+        self.context_with_remote_user = {'environment':
+                                         {'REMOTE_USER': 'FOO',
+                                          'AUTH_TYPE': 'Negotiate'}}
+        self.empty_context = {'environment': {}}
+
+        self.controller = token.controllers.Auth()
+
+    def assertEqualTokens(self, a, b, enforce_audit_ids=True):
+        """Assert that two tokens are equal.
+
+        Compare two tokens except for their ids. This also truncates
+        the time in the comparison.
+        """
+        def normalize(token):
+            token['access']['token']['id'] = 'dummy'
+            del token['access']['token']['expires']
+            del token['access']['token']['issued_at']
+            del token['access']['token']['audit_ids']
+            return token
+
+        self.assertCloseEnoughForGovernmentWork(
+            timeutils.parse_isotime(a['access']['token']['expires']),
+            timeutils.parse_isotime(b['access']['token']['expires']))
+        self.assertCloseEnoughForGovernmentWork(
+            timeutils.parse_isotime(a['access']['token']['issued_at']),
+            timeutils.parse_isotime(b['access']['token']['issued_at']))
+        if enforce_audit_ids:
+            self.assertIn(a['access']['token']['audit_ids'][0],
+                          b['access']['token']['audit_ids'])
+            self.assertThat(len(a['access']['token']['audit_ids']),
+                            matchers.LessThan(3))
+            self.assertThat(len(b['access']['token']['audit_ids']),
+                            matchers.LessThan(3))
+
+        return self.assertDictEqual(normalize(a), normalize(b))
+
+
+class AuthBadRequests(AuthTest):
+    def test_no_external_auth(self):
+        """Verify that _authenticate_external() raises exception if N/A."""
+        self.assertRaises(
+            token.controllers.ExternalAuthNotApplicable,
+            self.controller._authenticate_external,
+            context={}, auth={})
+
+    def test_empty_remote_user(self):
+        """Verify that _authenticate_external() raises exception if
+        REMOTE_USER is set as the empty string.
+        """
+        context = {'environment': {'REMOTE_USER': ''}}
+        self.assertRaises(
+            token.controllers.ExternalAuthNotApplicable,
+            self.controller._authenticate_external,
+            context=context, auth={})
+
+    def test_no_token_in_auth(self):
+        """Verify that _authenticate_token() raises exception if no token."""
+        self.assertRaises(
+            exception.ValidationError,
+            self.controller._authenticate_token,
+            None, {})
+
+    def test_no_credentials_in_auth(self):
+        """Verify that _authenticate_local() raises exception if no creds."""
+        self.assertRaises(
+            exception.ValidationError,
+            self.controller._authenticate_local,
+            None, {})
+
+    def test_empty_username_and_userid_in_auth(self):
+        """Verify that empty username and userID raises ValidationError."""
+        self.assertRaises(
+            exception.ValidationError,
+            self.controller._authenticate_local,
+            None, {'passwordCredentials': {'password': 'abc',
+                                           'userId': '', 'username': ''}})
+
+    def test_authenticate_blank_request_body(self):
+        """Verify sending empty json dict raises the right exception."""
+        self.assertRaises(exception.ValidationError,
+                          self.controller.authenticate,
+                          {}, {})
+
+    def test_authenticate_blank_auth(self):
+        """Verify sending blank 'auth' raises the right exception."""
+        body_dict = _build_user_auth()
+        self.assertRaises(exception.ValidationError,
+                          self.controller.authenticate,
+                          {}, body_dict)
+
+    def test_authenticate_invalid_auth_content(self):
+        """Verify sending invalid 'auth' raises the right exception."""
+        self.assertRaises(exception.ValidationError,
+                          self.controller.authenticate,
+                          {}, {'auth': 'abcd'})
+
+    def test_authenticate_user_id_too_large(self):
+        """Verify sending large 'userId' raises the right exception."""
+        body_dict = _build_user_auth(user_id='0' * 65, username='FOO',
+                                     password='foo2')
+        self.assertRaises(exception.ValidationSizeError,
+                          self.controller.authenticate,
+                          {}, body_dict)
+
+    def test_authenticate_username_too_large(self):
+        """Verify sending large 'username' raises the right exception."""
+        body_dict = _build_user_auth(username='0' * 65, password='foo2')
+        self.assertRaises(exception.ValidationSizeError,
+                          self.controller.authenticate,
+                          {}, body_dict)
+
+    def test_authenticate_tenant_id_too_large(self):
+        """Verify sending large 'tenantId' raises the right exception."""
+        body_dict = _build_user_auth(username='FOO', password='foo2',
+                                     tenant_id='0' * 65)
+        self.assertRaises(exception.ValidationSizeError,
+                          self.controller.authenticate,
+                          {}, body_dict)
+
+    def test_authenticate_tenant_name_too_large(self):
+        """Verify sending large 'tenantName' raises the right exception."""
+        body_dict = _build_user_auth(username='FOO', password='foo2',
+                                     tenant_name='0' * 65)
+        self.assertRaises(exception.ValidationSizeError,
+                          self.controller.authenticate,
+                          {}, body_dict)
+
+    def test_authenticate_token_too_large(self):
+        """Verify sending large 'token' raises the right exception."""
+        body_dict = _build_user_auth(token={'id': '0' * 8193})
+        self.assertRaises(exception.ValidationSizeError,
+                          self.controller.authenticate,
+                          {}, body_dict)
+
+    def test_authenticate_password_too_large(self):
+        """Verify sending large 'password' raises the right exception."""
+        length = CONF.identity.max_password_length + 1
+        body_dict = _build_user_auth(username='FOO', password='0' * length)
+        self.assertRaises(exception.ValidationSizeError,
+                          self.controller.authenticate,
+                          {}, body_dict)
+
+
+class AuthWithToken(AuthTest):
+    def test_unscoped_token(self):
+        """Verify getting an unscoped token with password creds."""
+        body_dict = _build_user_auth(username='FOO',
+                                     password='foo2')
+        unscoped_token = self.controller.authenticate({}, body_dict)
+        self.assertNotIn('tenant', unscoped_token['access']['token'])
+
+    def test_auth_invalid_token(self):
+        """Verify exception is raised if invalid token."""
+        body_dict = _build_user_auth(token={"id": uuid.uuid4().hex})
+        self.assertRaises(
+            exception.Unauthorized,
+            self.controller.authenticate,
+            {}, body_dict)
+
+    def test_auth_bad_formatted_token(self):
+        """Verify exception is raised if invalid token."""
+        body_dict = _build_user_auth(token={})
+        self.assertRaises(
+            exception.ValidationError,
+            self.controller.authenticate,
+            {}, body_dict)
+
+    def test_auth_unscoped_token_no_project(self):
+        """Verify getting an unscoped token with an unscoped token."""
+        body_dict = _build_user_auth(
+            username='FOO',
+            password='foo2')
+        unscoped_token = self.controller.authenticate({}, body_dict)
+
+        body_dict = _build_user_auth(
+            token=unscoped_token["access"]["token"])
+        unscoped_token_2 = self.controller.authenticate({}, body_dict)
+
+        self.assertEqualTokens(unscoped_token, unscoped_token_2)
+
+    def test_auth_unscoped_token_project(self):
+        """Verify getting a token in a tenant with an unscoped token."""
+        # Add a role in so we can check we get this back
+        self.assignment_api.add_role_to_user_and_project(
+            self.user_foo['id'],
+            self.tenant_bar['id'],
+            self.role_member['id'])
+        # Get an unscoped tenant
+        body_dict = _build_user_auth(
+            username='FOO',
+            password='foo2')
+        unscoped_token = self.controller.authenticate({}, body_dict)
+        # Get a token on BAR tenant using the unscoped tenant
+        body_dict = _build_user_auth(
+            token=unscoped_token["access"]["token"],
+            tenant_name="BAR")
+        scoped_token = self.controller.authenticate({}, body_dict)
+
+        tenant = scoped_token["access"]["token"]["tenant"]
+        roles = scoped_token["access"]["metadata"]["roles"]
+        self.assertEqual(self.tenant_bar['id'], tenant["id"])
+        self.assertThat(roles, matchers.Contains(self.role_member['id']))
+
+    def test_auth_token_project_group_role(self):
+        """Verify getting a token in a tenant with group roles."""
+        # Add a v2 style role in so we can check we get this back
+        self.assignment_api.add_role_to_user_and_project(
+            self.user_foo['id'],
+            self.tenant_bar['id'],
+            self.role_member['id'])
+        # Now create a group role for this user as well
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        new_group = {'domain_id': domain1['id'], 'name': uuid.uuid4().hex}
+        new_group = self.identity_api.create_group(new_group)
+        self.identity_api.add_user_to_group(self.user_foo['id'],
+                                            new_group['id'])
+        self.assignment_api.create_grant(
+            group_id=new_group['id'],
+            project_id=self.tenant_bar['id'],
+            role_id=self.role_admin['id'])
+
+        # Get a scoped token for the tenant
+        body_dict = _build_user_auth(
+            username='FOO',
+            password='foo2',
+            tenant_name="BAR")
+
+        scoped_token = self.controller.authenticate({}, body_dict)
+
+        tenant = scoped_token["access"]["token"]["tenant"]
+        roles = scoped_token["access"]["metadata"]["roles"]
+        self.assertEqual(self.tenant_bar['id'], tenant["id"])
+        self.assertIn(self.role_member['id'], roles)
+        self.assertIn(self.role_admin['id'], roles)
+
+    def test_belongs_to_no_tenant(self):
+        r = self.controller.authenticate(
+            {},
+            auth={
+                'passwordCredentials': {
+                    'username': self.user_foo['name'],
+                    'password': self.user_foo['password']
+                }
+            })
+        unscoped_token_id = r['access']['token']['id']
+        self.assertRaises(
+            exception.Unauthorized,
+            self.controller.validate_token,
+            dict(is_admin=True, query_string={'belongsTo': 'BAR'}),
+            token_id=unscoped_token_id)
+
+    def test_belongs_to(self):
+        body_dict = _build_user_auth(
+            username='FOO',
+            password='foo2',
+            tenant_name="BAR")
+
+        scoped_token = self.controller.authenticate({}, body_dict)
+        scoped_token_id = scoped_token['access']['token']['id']
+
+        self.assertRaises(
+            exception.Unauthorized,
+            self.controller.validate_token,
+            dict(is_admin=True, query_string={'belongsTo': 'me'}),
+            token_id=scoped_token_id)
+
+        self.assertRaises(
+            exception.Unauthorized,
+            self.controller.validate_token,
+            dict(is_admin=True, query_string={'belongsTo': 'BAR'}),
+            token_id=scoped_token_id)
+
+    def test_token_auth_with_binding(self):
+        self.config_fixture.config(group='token', bind=['kerberos'])
+        body_dict = _build_user_auth()
+        unscoped_token = self.controller.authenticate(
+            self.context_with_remote_user, body_dict)
+
+        # the token should have bind information in it
+        bind = unscoped_token['access']['token']['bind']
+        self.assertEqual('FOO', bind['kerberos'])
+
+        body_dict = _build_user_auth(
+            token=unscoped_token['access']['token'],
+            tenant_name='BAR')
+
+        # using unscoped token without remote user context fails
+        self.assertRaises(
+            exception.Unauthorized,
+            self.controller.authenticate,
+            self.empty_context, body_dict)
+
+        # using token with remote user context succeeds
+        scoped_token = self.controller.authenticate(
+            self.context_with_remote_user, body_dict)
+
+        # the bind information should be carried over from the original token
+        bind = scoped_token['access']['token']['bind']
+        self.assertEqual('FOO', bind['kerberos'])
+
+    def test_deleting_role_revokes_token(self):
+        role_controller = assignment.controllers.Role()
+        project1 = {'id': 'Project1', 'name': uuid.uuid4().hex,
+                    'domain_id': DEFAULT_DOMAIN_ID}
+        self.resource_api.create_project(project1['id'], project1)
+        role_one = {'id': 'role_one', 'name': uuid.uuid4().hex}
+        self.role_api.create_role(role_one['id'], role_one)
+        self.assignment_api.add_role_to_user_and_project(
+            self.user_foo['id'], project1['id'], role_one['id'])
+        no_context = {}
+
+        # Get a scoped token for the tenant
+        body_dict = _build_user_auth(
+            username=self.user_foo['name'],
+            password=self.user_foo['password'],
+            tenant_name=project1['name'])
+        token = self.controller.authenticate(no_context, body_dict)
+        # Ensure it is valid
+        token_id = token['access']['token']['id']
+        self.controller.validate_token(
+            dict(is_admin=True, query_string={}),
+            token_id=token_id)
+
+        # Delete the role, which should invalidate the token
+        role_controller.delete_role(
+            dict(is_admin=True, query_string={}), role_one['id'])
+
+        # Check the token is now invalid
+        self.assertRaises(
+            exception.TokenNotFound,
+            self.controller.validate_token,
+            dict(is_admin=True, query_string={}),
+            token_id=token_id)
+
+    def test_only_original_audit_id_is_kept(self):
+        context = {}
+
+        def get_audit_ids(token):
+            return token['access']['token']['audit_ids']
+
+        # get a token
+        body_dict = _build_user_auth(username='FOO', password='foo2')
+        unscoped_token = self.controller.authenticate(context, body_dict)
+        starting_audit_id = get_audit_ids(unscoped_token)[0]
+        self.assertIsNotNone(starting_audit_id)
+
+        # get another token to ensure the correct parent audit_id is set
+        body_dict = _build_user_auth(token=unscoped_token["access"]["token"])
+        unscoped_token_2 = self.controller.authenticate(context, body_dict)
+        audit_ids = get_audit_ids(unscoped_token_2)
+        self.assertThat(audit_ids, matchers.HasLength(2))
+        self.assertThat(audit_ids[-1], matchers.Equals(starting_audit_id))
+
+        # get another token from token 2 and ensure the correct parent
+        # audit_id is set
+        body_dict = _build_user_auth(token=unscoped_token_2["access"]["token"])
+        unscoped_token_3 = self.controller.authenticate(context, body_dict)
+        audit_ids = get_audit_ids(unscoped_token_3)
+        self.assertThat(audit_ids, matchers.HasLength(2))
+        self.assertThat(audit_ids[-1], matchers.Equals(starting_audit_id))
+
+    def test_revoke_by_audit_chain_id_original_token(self):
+        self.config_fixture.config(group='token', revoke_by_id=False)
+        context = {}
+
+        # get a token
+        body_dict = _build_user_auth(username='FOO', password='foo2')
+        unscoped_token = self.controller.authenticate(context, body_dict)
+        token_id = unscoped_token['access']['token']['id']
+        # get a second token
+        body_dict = _build_user_auth(token=unscoped_token["access"]["token"])
+        unscoped_token_2 = self.controller.authenticate(context, body_dict)
+        token_2_id = unscoped_token_2['access']['token']['id']
+
+        self.token_provider_api.revoke_token(token_id, revoke_chain=True)
+
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api.validate_v2_token,
+                          token_id=token_id)
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api.validate_v2_token,
+                          token_id=token_2_id)
+
+    def test_revoke_by_audit_chain_id_chained_token(self):
+        self.config_fixture.config(group='token', revoke_by_id=False)
+        context = {}
+
+        # get a token
+        body_dict = _build_user_auth(username='FOO', password='foo2')
+        unscoped_token = self.controller.authenticate(context, body_dict)
+        token_id = unscoped_token['access']['token']['id']
+        # get a second token
+        body_dict = _build_user_auth(token=unscoped_token["access"]["token"])
+        unscoped_token_2 = self.controller.authenticate(context, body_dict)
+        token_2_id = unscoped_token_2['access']['token']['id']
+
+        self.token_provider_api.revoke_token(token_2_id, revoke_chain=True)
+
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api.validate_v2_token,
+                          token_id=token_id)
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api.validate_v2_token,
+                          token_id=token_2_id)
+
+    def _mock_audit_info(self, parent_audit_id):
+        # NOTE(morgainfainberg): The token model and other cases that are
+        # extracting the audit id expect 'None' if the audit id doesn't
+        # exist. This ensures that the audit_id is None and the
+        # audit_chain_id will also return None.
+        return [None, None]
+
+    def test_revoke_with_no_audit_info(self):
+        self.config_fixture.config(group='token', revoke_by_id=False)
+        context = {}
+
+        with mock.patch.object(provider, 'audit_info', self._mock_audit_info):
+            # get a token
+            body_dict = _build_user_auth(username='FOO', password='foo2')
+            unscoped_token = self.controller.authenticate(context, body_dict)
+            token_id = unscoped_token['access']['token']['id']
+            # get a second token
+            body_dict = _build_user_auth(
+                token=unscoped_token['access']['token'])
+            unscoped_token_2 = self.controller.authenticate(context, body_dict)
+            token_2_id = unscoped_token_2['access']['token']['id']
+
+            self.token_provider_api.revoke_token(token_id, revoke_chain=True)
+
+            revoke_events = self.revoke_api.list_events()
+            self.assertThat(revoke_events, matchers.HasLength(1))
+            revoke_event = revoke_events[0].to_dict()
+            self.assertIn('expires_at', revoke_event)
+            self.assertEqual(unscoped_token_2['access']['token']['expires'],
+                             revoke_event['expires_at'])
+
+            self.assertRaises(exception.TokenNotFound,
+                              self.token_provider_api.validate_v2_token,
+                              token_id=token_id)
+            self.assertRaises(exception.TokenNotFound,
+                              self.token_provider_api.validate_v2_token,
+                              token_id=token_2_id)
+
+            # get a new token, with no audit info
+            body_dict = _build_user_auth(username='FOO', password='foo2')
+            unscoped_token = self.controller.authenticate(context, body_dict)
+            token_id = unscoped_token['access']['token']['id']
+            # get a second token
+            body_dict = _build_user_auth(
+                token=unscoped_token['access']['token'])
+            unscoped_token_2 = self.controller.authenticate(context, body_dict)
+            token_2_id = unscoped_token_2['access']['token']['id']
+
+            # Revoke by audit_id, no audit_info means both parent and child
+            # token are revoked.
+            self.token_provider_api.revoke_token(token_id)
+
+            revoke_events = self.revoke_api.list_events()
+            self.assertThat(revoke_events, matchers.HasLength(2))
+            revoke_event = revoke_events[1].to_dict()
+            self.assertIn('expires_at', revoke_event)
+            self.assertEqual(unscoped_token_2['access']['token']['expires'],
+                             revoke_event['expires_at'])
+
+            self.assertRaises(exception.TokenNotFound,
+                              self.token_provider_api.validate_v2_token,
+                              token_id=token_id)
+            self.assertRaises(exception.TokenNotFound,
+                              self.token_provider_api.validate_v2_token,
+                              token_id=token_2_id)
+
+
+class AuthWithPasswordCredentials(AuthTest):
+    def test_auth_invalid_user(self):
+        """Verify exception is raised if invalid user."""
+        body_dict = _build_user_auth(
+            username=uuid.uuid4().hex,
+            password=uuid.uuid4().hex)
+        self.assertRaises(
+            exception.Unauthorized,
+            self.controller.authenticate,
+            {}, body_dict)
+
+    def test_auth_valid_user_invalid_password(self):
+        """Verify exception is raised if invalid password."""
+        body_dict = _build_user_auth(
+            username="FOO",
+            password=uuid.uuid4().hex)
+        self.assertRaises(
+            exception.Unauthorized,
+            self.controller.authenticate,
+            {}, body_dict)
+
+    def test_auth_empty_password(self):
+        """Verify exception is raised if empty password."""
+        body_dict = _build_user_auth(
+            username="FOO",
+            password="")
+        self.assertRaises(
+            exception.Unauthorized,
+            self.controller.authenticate,
+            {}, body_dict)
+
+    def test_auth_no_password(self):
+        """Verify exception is raised if empty password."""
+        body_dict = _build_user_auth(username="FOO")
+        self.assertRaises(
+            exception.ValidationError,
+            self.controller.authenticate,
+            {}, body_dict)
+
+    def test_authenticate_blank_password_credentials(self):
+        """Sending empty dict as passwordCredentials raises a 400 error."""
+        body_dict = {'passwordCredentials': {}, 'tenantName': 'demo'}
+        self.assertRaises(exception.ValidationError,
+                          self.controller.authenticate,
+                          {}, body_dict)
+
+    def test_authenticate_no_username(self):
+        """Verify skipping username raises the right exception."""
+        body_dict = _build_user_auth(password="pass",
+                                     tenant_name="demo")
+        self.assertRaises(exception.ValidationError,
+                          self.controller.authenticate,
+                          {}, body_dict)
+
+    def test_bind_without_remote_user(self):
+        self.config_fixture.config(group='token', bind=['kerberos'])
+        body_dict = _build_user_auth(username='FOO', password='foo2',
+                                     tenant_name='BAR')
+        token = self.controller.authenticate({}, body_dict)
+        self.assertNotIn('bind', token['access']['token'])
+
+    def test_change_default_domain_id(self):
+        # If the default_domain_id config option is not the default then the
+        # user in auth data is from the new default domain.
+
+        # 1) Create a new domain.
+        new_domain_id = uuid.uuid4().hex
+        new_domain = {
+            'description': uuid.uuid4().hex,
+            'enabled': True,
+            'id': new_domain_id,
+            'name': uuid.uuid4().hex,
+        }
+
+        self.resource_api.create_domain(new_domain_id, new_domain)
+
+        # 2) Create user "foo" in new domain with different password than
+        #    default-domain foo.
+        new_user_password = uuid.uuid4().hex
+        new_user = {
+            'name': self.user_foo['name'],
+            'domain_id': new_domain_id,
+            'password': new_user_password,
+            'email': 'foo@bar2.com',
+        }
+
+        new_user = self.identity_api.create_user(new_user)
+
+        # 3) Update the default_domain_id config option to the new domain
+
+        self.config_fixture.config(group='identity',
+                                   default_domain_id=new_domain_id)
+
+        # 4) Authenticate as "foo" using the password in the new domain.
+
+        body_dict = _build_user_auth(
+            username=self.user_foo['name'],
+            password=new_user_password)
+
+        # The test is successful if this doesn't raise, so no need to assert.
+        self.controller.authenticate({}, body_dict)
+
+
+class AuthWithRemoteUser(AuthTest):
+    def test_unscoped_remote_authn(self):
+        """Verify getting an unscoped token with external authn."""
+        body_dict = _build_user_auth(
+            username='FOO',
+            password='foo2')
+        local_token = self.controller.authenticate(
+            {}, body_dict)
+
+        body_dict = _build_user_auth()
+        remote_token = self.controller.authenticate(
+            self.context_with_remote_user, body_dict)
+
+        self.assertEqualTokens(local_token, remote_token,
+                               enforce_audit_ids=False)
+
+    def test_unscoped_remote_authn_jsonless(self):
+        """Verify that external auth with invalid request fails."""
+        self.assertRaises(
+            exception.ValidationError,
+            self.controller.authenticate,
+            {'REMOTE_USER': 'FOO'},
+            None)
+
+    def test_scoped_remote_authn(self):
+        """Verify getting a token with external authn."""
+        body_dict = _build_user_auth(
+            username='FOO',
+            password='foo2',
+            tenant_name='BAR')
+        local_token = self.controller.authenticate(
+            {}, body_dict)
+
+        body_dict = _build_user_auth(
+            tenant_name='BAR')
+        remote_token = self.controller.authenticate(
+            self.context_with_remote_user, body_dict)
+
+        self.assertEqualTokens(local_token, remote_token,
+                               enforce_audit_ids=False)
+
+    def test_scoped_nometa_remote_authn(self):
+        """Verify getting a token with external authn and no metadata."""
+        body_dict = _build_user_auth(
+            username='TWO',
+            password='two2',
+            tenant_name='BAZ')
+        local_token = self.controller.authenticate(
+            {}, body_dict)
+
+        body_dict = _build_user_auth(tenant_name='BAZ')
+        remote_token = self.controller.authenticate(
+            {'environment': {'REMOTE_USER': 'TWO'}}, body_dict)
+
+        self.assertEqualTokens(local_token, remote_token,
+                               enforce_audit_ids=False)
+
+    def test_scoped_remote_authn_invalid_user(self):
+        """Verify that external auth with invalid user fails."""
+        body_dict = _build_user_auth(tenant_name="BAR")
+        self.assertRaises(
+            exception.Unauthorized,
+            self.controller.authenticate,
+            {'environment': {'REMOTE_USER': uuid.uuid4().hex}},
+            body_dict)
+
+    def test_bind_with_kerberos(self):
+        self.config_fixture.config(group='token', bind=['kerberos'])
+        body_dict = _build_user_auth(tenant_name="BAR")
+        token = self.controller.authenticate(self.context_with_remote_user,
+                                             body_dict)
+        self.assertEqual('FOO', token['access']['token']['bind']['kerberos'])
+
+    def test_bind_without_config_opt(self):
+        self.config_fixture.config(group='token', bind=['x509'])
+        body_dict = _build_user_auth(tenant_name='BAR')
+        token = self.controller.authenticate(self.context_with_remote_user,
+                                             body_dict)
+        self.assertNotIn('bind', token['access']['token'])
+
+
+class AuthWithTrust(AuthTest):
+    def setUp(self):
+        super(AuthWithTrust, self).setUp()
+
+        self.trust_controller = trust.controllers.TrustV3()
+        self.auth_v3_controller = auth.controllers.Auth()
+        self.trustor = self.user_foo
+        self.trustee = self.user_two
+        self.assigned_roles = [self.role_member['id'],
+                               self.role_browser['id']]
+        for assigned_role in self.assigned_roles:
+            self.assignment_api.add_role_to_user_and_project(
+                self.trustor['id'], self.tenant_bar['id'], assigned_role)
+
+        self.sample_data = {'trustor_user_id': self.trustor['id'],
+                            'trustee_user_id': self.trustee['id'],
+                            'project_id': self.tenant_bar['id'],
+                            'impersonation': True,
+                            'roles': [{'id': self.role_browser['id']},
+                                      {'name': self.role_member['name']}]}
+
+    def config_overrides(self):
+        super(AuthWithTrust, self).config_overrides()
+        self.config_fixture.config(group='trust', enabled=True)
+
+    def _create_auth_context(self, token_id):
+        token_ref = token_model.KeystoneToken(
+            token_id=token_id,
+            token_data=self.token_provider_api.validate_token(token_id))
+        auth_context = authorization.token_to_auth_context(token_ref)
+        return {'environment': {authorization.AUTH_CONTEXT_ENV: auth_context},
+                'token_id': token_id,
+                'host_url': HOST_URL}
+
+    def create_trust(self, trust_data, trustor_name, expires_at=None,
+                     impersonation=True):
+        username = trustor_name
+        password = 'foo2'
+        unscoped_token = self.get_unscoped_token(username, password)
+        context = self._create_auth_context(
+            unscoped_token['access']['token']['id'])
+        trust_data_copy = copy.deepcopy(trust_data)
+        trust_data_copy['expires_at'] = expires_at
+        trust_data_copy['impersonation'] = impersonation
+
+        return self.trust_controller.create_trust(
+            context, trust=trust_data_copy)['trust']
+
+    def get_unscoped_token(self, username, password='foo2'):
+        body_dict = _build_user_auth(username=username, password=password)
+        return self.controller.authenticate({}, body_dict)
+
+    def build_v2_token_request(self, username, password, trust,
+                               tenant_id=None):
+        if not tenant_id:
+            tenant_id = self.tenant_bar['id']
+        unscoped_token = self.get_unscoped_token(username, password)
+        unscoped_token_id = unscoped_token['access']['token']['id']
+        request_body = _build_user_auth(token={'id': unscoped_token_id},
+                                        trust_id=trust['id'],
+                                        tenant_id=tenant_id)
+        return request_body
+
+    def test_create_trust_bad_data_fails(self):
+        unscoped_token = self.get_unscoped_token(self.trustor['name'])
+        context = self._create_auth_context(
+            unscoped_token['access']['token']['id'])
+        bad_sample_data = {'trustor_user_id': self.trustor['id'],
+                           'project_id': self.tenant_bar['id'],
+                           'roles': [{'id': self.role_browser['id']}]}
+
+        self.assertRaises(exception.ValidationError,
+                          self.trust_controller.create_trust,
+                          context, trust=bad_sample_data)
+
+    def test_create_trust_no_roles(self):
+        unscoped_token = self.get_unscoped_token(self.trustor['name'])
+        context = {'token_id': unscoped_token['access']['token']['id']}
+        self.sample_data['roles'] = []
+        self.assertRaises(exception.Forbidden,
+                          self.trust_controller.create_trust,
+                          context, trust=self.sample_data)
+
+    def test_create_trust(self):
+        expires_at = timeutils.strtime(timeutils.utcnow() +
+                                       datetime.timedelta(minutes=10),
+                                       fmt=TIME_FORMAT)
+        new_trust = self.create_trust(self.sample_data, self.trustor['name'],
+                                      expires_at=expires_at)
+        self.assertEqual(self.trustor['id'], new_trust['trustor_user_id'])
+        self.assertEqual(self.trustee['id'], new_trust['trustee_user_id'])
+        role_ids = [self.role_browser['id'], self.role_member['id']]
+        self.assertTrue(timeutils.parse_strtime(new_trust['expires_at'],
+                                                fmt=TIME_FORMAT))
+        self.assertIn('%s/v3/OS-TRUST/' % HOST_URL,
+                      new_trust['links']['self'])
+        self.assertIn('%s/v3/OS-TRUST/' % HOST_URL,
+                      new_trust['roles_links']['self'])
+
+        for role in new_trust['roles']:
+            self.assertIn(role['id'], role_ids)
+
+    def test_create_trust_expires_bad(self):
+        self.assertRaises(exception.ValidationTimeStampError,
+                          self.create_trust, self.sample_data,
+                          self.trustor['name'], expires_at="bad")
+        self.assertRaises(exception.ValidationTimeStampError,
+                          self.create_trust, self.sample_data,
+                          self.trustor['name'], expires_at="")
+        self.assertRaises(exception.ValidationTimeStampError,
+                          self.create_trust, self.sample_data,
+                          self.trustor['name'], expires_at="Z")
+
+    def test_create_trust_without_project_id(self):
+        """Verify that trust can be created without project id and
+        token can be generated with that trust.
+        """
+        unscoped_token = self.get_unscoped_token(self.trustor['name'])
+        context = self._create_auth_context(
+            unscoped_token['access']['token']['id'])
+        self.sample_data['project_id'] = None
+        self.sample_data['roles'] = []
+        new_trust = self.trust_controller.create_trust(
+            context, trust=self.sample_data)['trust']
+        self.assertEqual(self.trustor['id'], new_trust['trustor_user_id'])
+        self.assertEqual(self.trustee['id'], new_trust['trustee_user_id'])
+        self.assertIs(new_trust['impersonation'], True)
+        auth_response = self.fetch_v2_token_from_trust(new_trust)
+        token_user = auth_response['access']['user']
+        self.assertEqual(token_user['id'], new_trust['trustor_user_id'])
+
+    def test_get_trust(self):
+        unscoped_token = self.get_unscoped_token(self.trustor['name'])
+        context = {'token_id': unscoped_token['access']['token']['id'],
+                   'host_url': HOST_URL}
+        new_trust = self.trust_controller.create_trust(
+            context, trust=self.sample_data)['trust']
+        trust = self.trust_controller.get_trust(context,
+                                                new_trust['id'])['trust']
+        self.assertEqual(self.trustor['id'], trust['trustor_user_id'])
+        self.assertEqual(self.trustee['id'], trust['trustee_user_id'])
+        role_ids = [self.role_browser['id'], self.role_member['id']]
+        for role in new_trust['roles']:
+            self.assertIn(role['id'], role_ids)
+
+    def test_create_trust_no_impersonation(self):
+        new_trust = self.create_trust(self.sample_data, self.trustor['name'],
+                                      expires_at=None, impersonation=False)
+        self.assertEqual(self.trustor['id'], new_trust['trustor_user_id'])
+        self.assertEqual(self.trustee['id'], new_trust['trustee_user_id'])
+        self.assertIs(new_trust['impersonation'], False)
+        auth_response = self.fetch_v2_token_from_trust(new_trust)
+        token_user = auth_response['access']['user']
+        self.assertEqual(token_user['id'], new_trust['trustee_user_id'])
+
+        # TODO(ayoung): Endpoints
+
+    def test_create_trust_impersonation(self):
+        new_trust = self.create_trust(self.sample_data, self.trustor['name'])
+        self.assertEqual(self.trustor['id'], new_trust['trustor_user_id'])
+        self.assertEqual(self.trustee['id'], new_trust['trustee_user_id'])
+        self.assertIs(new_trust['impersonation'], True)
+        auth_response = self.fetch_v2_token_from_trust(new_trust)
+        token_user = auth_response['access']['user']
+        self.assertEqual(token_user['id'], new_trust['trustor_user_id'])
+
+    def test_token_from_trust_wrong_user_fails(self):
+        new_trust = self.create_trust(self.sample_data, self.trustor['name'])
+        request_body = self.build_v2_token_request('FOO', 'foo2', new_trust)
+        self.assertRaises(exception.Forbidden, self.controller.authenticate,
+                          {}, request_body)
+
+    def test_token_from_trust_wrong_project_fails(self):
+        for assigned_role in self.assigned_roles:
+            self.assignment_api.add_role_to_user_and_project(
+                self.trustor['id'], self.tenant_baz['id'], assigned_role)
+        new_trust = self.create_trust(self.sample_data, self.trustor['name'])
+        request_body = self.build_v2_token_request('TWO', 'two2', new_trust,
+                                                   self.tenant_baz['id'])
+        self.assertRaises(exception.Forbidden, self.controller.authenticate,
+                          {}, request_body)
+
+    def fetch_v2_token_from_trust(self, trust):
+        request_body = self.build_v2_token_request('TWO', 'two2', trust)
+        auth_response = self.controller.authenticate({}, request_body)
+        return auth_response
+
+    def fetch_v3_token_from_trust(self, trust, trustee):
+        v3_password_data = {
+            'identity': {
+                "methods": ["password"],
+                "password": {
+                    "user": {
+                        "id": trustee["id"],
+                        "password": trustee["password"]
+                    }
+                }
+            },
+            'scope': {
+                'project': {
+                    'id': self.tenant_baz['id']
+                }
+            }
+        }
+        auth_response = (self.auth_v3_controller.authenticate_for_token
+                         ({'environment': {},
+                           'query_string': {}},
+                          v3_password_data))
+        token = auth_response.headers['X-Subject-Token']
+
+        v3_req_with_trust = {
+            "identity": {
+                "methods": ["token"],
+                "token": {"id": token}},
+            "scope": {
+                "OS-TRUST:trust": {"id": trust['id']}}}
+        token_auth_response = (self.auth_v3_controller.authenticate_for_token
+                               ({'environment': {},
+                                 'query_string': {}},
+                                v3_req_with_trust))
+        return token_auth_response
+
+    def test_create_v3_token_from_trust(self):
+        new_trust = self.create_trust(self.sample_data, self.trustor['name'])
+        auth_response = self.fetch_v3_token_from_trust(new_trust, self.trustee)
+
+        trust_token_user = auth_response.json['token']['user']
+        self.assertEqual(self.trustor['id'], trust_token_user['id'])
+
+        trust_token_trust = auth_response.json['token']['OS-TRUST:trust']
+        self.assertEqual(trust_token_trust['id'], new_trust['id'])
+        self.assertEqual(self.trustor['id'],
+                         trust_token_trust['trustor_user']['id'])
+        self.assertEqual(self.trustee['id'],
+                         trust_token_trust['trustee_user']['id'])
+
+        trust_token_roles = auth_response.json['token']['roles']
+        self.assertEqual(2, len(trust_token_roles))
+
+    def test_v3_trust_token_get_token_fails(self):
+        new_trust = self.create_trust(self.sample_data, self.trustor['name'])
+        auth_response = self.fetch_v3_token_from_trust(new_trust, self.trustee)
+        trust_token = auth_response.headers['X-Subject-Token']
+        v3_token_data = {'identity': {
+            'methods': ['token'],
+            'token': {'id': trust_token}
+        }}
+        self.assertRaises(
+            exception.Forbidden,
+            self.auth_v3_controller.authenticate_for_token,
+            {'environment': {},
+             'query_string': {}}, v3_token_data)
+
+    def test_token_from_trust(self):
+        new_trust = self.create_trust(self.sample_data, self.trustor['name'])
+        auth_response = self.fetch_v2_token_from_trust(new_trust)
+
+        self.assertIsNotNone(auth_response)
+        self.assertEqual(2,
+                         len(auth_response['access']['metadata']['roles']),
+                         "user_foo has three roles, but the token should"
+                         " only get the two roles specified in the trust.")
+
+    def assert_token_count_for_trust(self, trust, expected_value):
+        tokens = self.token_provider_api._persistence._list_tokens(
+            self.trustee['id'], trust_id=trust['id'])
+        token_count = len(tokens)
+        self.assertEqual(expected_value, token_count)
+
+    def test_delete_tokens_for_user_invalidates_tokens_from_trust(self):
+        new_trust = self.create_trust(self.sample_data, self.trustor['name'])
+        self.assert_token_count_for_trust(new_trust, 0)
+        self.fetch_v2_token_from_trust(new_trust)
+        self.assert_token_count_for_trust(new_trust, 1)
+        self.token_provider_api._persistence.delete_tokens_for_user(
+            self.trustee['id'])
+        self.assert_token_count_for_trust(new_trust, 0)
+
+    def test_token_from_trust_cant_get_another_token(self):
+        new_trust = self.create_trust(self.sample_data, self.trustor['name'])
+        auth_response = self.fetch_v2_token_from_trust(new_trust)
+        trust_token_id = auth_response['access']['token']['id']
+        request_body = _build_user_auth(token={'id': trust_token_id},
+                                        tenant_id=self.tenant_bar['id'])
+        self.assertRaises(
+            exception.Forbidden,
+            self.controller.authenticate, {}, request_body)
+
+    def test_delete_trust_revokes_token(self):
+        unscoped_token = self.get_unscoped_token(self.trustor['name'])
+        new_trust = self.create_trust(self.sample_data, self.trustor['name'])
+        context = self._create_auth_context(
+            unscoped_token['access']['token']['id'])
+        self.fetch_v2_token_from_trust(new_trust)
+        trust_id = new_trust['id']
+        tokens = self.token_provider_api._persistence._list_tokens(
+            self.trustor['id'],
+            trust_id=trust_id)
+        self.assertEqual(1, len(tokens))
+        self.trust_controller.delete_trust(context, trust_id=trust_id)
+        tokens = self.token_provider_api._persistence._list_tokens(
+            self.trustor['id'],
+            trust_id=trust_id)
+        self.assertEqual(0, len(tokens))
+
+    def test_token_from_trust_with_no_role_fails(self):
+        new_trust = self.create_trust(self.sample_data, self.trustor['name'])
+        for assigned_role in self.assigned_roles:
+            self.assignment_api.remove_role_from_user_and_project(
+                self.trustor['id'], self.tenant_bar['id'], assigned_role)
+        request_body = self.build_v2_token_request('TWO', 'two2', new_trust)
+        self.assertRaises(
+            exception.Forbidden,
+            self.controller.authenticate, {}, request_body)
+
+    def test_expired_trust_get_token_fails(self):
+        expiry = "1999-02-18T10:10:00Z"
+        new_trust = self.create_trust(self.sample_data, self.trustor['name'],
+                                      expiry)
+        request_body = self.build_v2_token_request('TWO', 'two2', new_trust)
+        self.assertRaises(
+            exception.Forbidden,
+            self.controller.authenticate, {}, request_body)
+
+    def test_token_from_trust_with_wrong_role_fails(self):
+        new_trust = self.create_trust(self.sample_data, self.trustor['name'])
+        self.assignment_api.add_role_to_user_and_project(
+            self.trustor['id'],
+            self.tenant_bar['id'],
+            self.role_other['id'])
+        for assigned_role in self.assigned_roles:
+            self.assignment_api.remove_role_from_user_and_project(
+                self.trustor['id'], self.tenant_bar['id'], assigned_role)
+
+        request_body = self.build_v2_token_request('TWO', 'two2', new_trust)
+
+        self.assertRaises(
+            exception.Forbidden,
+            self.controller.authenticate, {}, request_body)
+
+    def test_do_not_consume_remaining_uses_when_get_token_fails(self):
+        trust_data = copy.deepcopy(self.sample_data)
+        trust_data['remaining_uses'] = 3
+        new_trust = self.create_trust(trust_data, self.trustor['name'])
+
+        for assigned_role in self.assigned_roles:
+            self.assignment_api.remove_role_from_user_and_project(
+                self.trustor['id'], self.tenant_bar['id'], assigned_role)
+
+        request_body = self.build_v2_token_request('TWO', 'two2', new_trust)
+        self.assertRaises(exception.Forbidden,
+                          self.controller.authenticate, {}, request_body)
+
+        unscoped_token = self.get_unscoped_token(self.trustor['name'])
+        context = self._create_auth_context(
+            unscoped_token['access']['token']['id'])
+        trust = self.trust_controller.get_trust(context,
+                                                new_trust['id'])['trust']
+        self.assertEqual(3, trust['remaining_uses'])
+
+    def test_v2_trust_token_contains_trustor_user_id_and_impersonation(self):
+        new_trust = self.create_trust(self.sample_data, self.trustor['name'])
+        auth_response = self.fetch_v2_token_from_trust(new_trust)
+
+        self.assertEqual(new_trust['trustee_user_id'],
+                         auth_response['access']['trust']['trustee_user_id'])
+        self.assertEqual(new_trust['trustor_user_id'],
+                         auth_response['access']['trust']['trustor_user_id'])
+        self.assertEqual(new_trust['impersonation'],
+                         auth_response['access']['trust']['impersonation'])
+        self.assertEqual(new_trust['id'],
+                         auth_response['access']['trust']['id'])
+
+        validate_response = self.controller.validate_token(
+            context=dict(is_admin=True, query_string={}),
+            token_id=auth_response['access']['token']['id'])
+        self.assertEqual(
+            new_trust['trustee_user_id'],
+            validate_response['access']['trust']['trustee_user_id'])
+        self.assertEqual(
+            new_trust['trustor_user_id'],
+            validate_response['access']['trust']['trustor_user_id'])
+        self.assertEqual(
+            new_trust['impersonation'],
+            validate_response['access']['trust']['impersonation'])
+        self.assertEqual(
+            new_trust['id'],
+            validate_response['access']['trust']['id'])
+
+    def disable_user(self, user):
+        user['enabled'] = False
+        self.identity_api.update_user(user['id'], user)
+
+    def test_trust_get_token_fails_if_trustor_disabled(self):
+        new_trust = self.create_trust(self.sample_data, self.trustor['name'])
+        request_body = self.build_v2_token_request(self.trustee['name'],
+                                                   self.trustee['password'],
+                                                   new_trust)
+        self.disable_user(self.trustor)
+        self.assertRaises(
+            exception.Forbidden,
+            self.controller.authenticate, {}, request_body)
+
+    def test_trust_get_token_fails_if_trustee_disabled(self):
+        new_trust = self.create_trust(self.sample_data, self.trustor['name'])
+        request_body = self.build_v2_token_request(self.trustee['name'],
+                                                   self.trustee['password'],
+                                                   new_trust)
+        self.disable_user(self.trustee)
+        self.assertRaises(
+            exception.Unauthorized,
+            self.controller.authenticate, {}, request_body)
+
+
+class TokenExpirationTest(AuthTest):
+
+    @mock.patch.object(timeutils, 'utcnow')
+    def _maintain_token_expiration(self, mock_utcnow):
+        """Token expiration should be maintained after re-auth & validation."""
+        now = datetime.datetime.utcnow()
+        mock_utcnow.return_value = now
+
+        r = self.controller.authenticate(
+            {},
+            auth={
+                'passwordCredentials': {
+                    'username': self.user_foo['name'],
+                    'password': self.user_foo['password']
+                }
+            })
+        unscoped_token_id = r['access']['token']['id']
+        original_expiration = r['access']['token']['expires']
+
+        mock_utcnow.return_value = now + datetime.timedelta(seconds=1)
+
+        r = self.controller.validate_token(
+            dict(is_admin=True, query_string={}),
+            token_id=unscoped_token_id)
+        self.assertEqual(original_expiration, r['access']['token']['expires'])
+
+        mock_utcnow.return_value = now + datetime.timedelta(seconds=2)
+
+        r = self.controller.authenticate(
+            {},
+            auth={
+                'token': {
+                    'id': unscoped_token_id,
+                },
+                'tenantId': self.tenant_bar['id'],
+            })
+        scoped_token_id = r['access']['token']['id']
+        self.assertEqual(original_expiration, r['access']['token']['expires'])
+
+        mock_utcnow.return_value = now + datetime.timedelta(seconds=3)
+
+        r = self.controller.validate_token(
+            dict(is_admin=True, query_string={}),
+            token_id=scoped_token_id)
+        self.assertEqual(original_expiration, r['access']['token']['expires'])
+
+    def test_maintain_uuid_token_expiration(self):
+        self.config_fixture.config(
+            group='token',
+            provider='keystone.token.providers.uuid.Provider')
+        self._maintain_token_expiration()
+
+
+class AuthCatalog(tests.SQLDriverOverrides, AuthTest):
+    """Tests for the catalog provided in the auth response."""
+
+    def config_files(self):
+        config_files = super(AuthCatalog, self).config_files()
+        # We need to use a backend that supports disabled endpoints, like the
+        # SQL backend.
+        config_files.append(tests.dirs.tests_conf('backend_sql.conf'))
+        return config_files
+
+    def _create_endpoints(self):
+        def create_region(**kwargs):
+            ref = {'id': uuid.uuid4().hex}
+            ref.update(kwargs)
+            self.catalog_api.create_region(ref)
+            return ref
+
+        def create_endpoint(service_id, region, **kwargs):
+            id_ = uuid.uuid4().hex
+            ref = {
+                'id': id_,
+                'interface': 'public',
+                'region_id': region,
+                'service_id': service_id,
+                'url': 'http://localhost/%s' % uuid.uuid4().hex,
+            }
+            ref.update(kwargs)
+            self.catalog_api.create_endpoint(id_, ref)
+            return ref
+
+        # Create a service for use with the endpoints.
+        def create_service(**kwargs):
+            id_ = uuid.uuid4().hex
+            ref = {
+                'id': id_,
+                'name': uuid.uuid4().hex,
+                'type': uuid.uuid4().hex,
+            }
+            ref.update(kwargs)
+            self.catalog_api.create_service(id_, ref)
+            return ref
+
+        enabled_service_ref = create_service(enabled=True)
+        disabled_service_ref = create_service(enabled=False)
+
+        region = create_region()
+
+        # Create endpoints
+        enabled_endpoint_ref = create_endpoint(
+            enabled_service_ref['id'], region['id'])
+        create_endpoint(
+            enabled_service_ref['id'], region['id'], enabled=False,
+            interface='internal')
+        create_endpoint(
+            disabled_service_ref['id'], region['id'])
+
+        return enabled_endpoint_ref
+
+    def test_auth_catalog_disabled_endpoint(self):
+        """On authenticate, get a catalog that excludes disabled endpoints."""
+        endpoint_ref = self._create_endpoints()
+
+        # Authenticate
+        body_dict = _build_user_auth(
+            username='FOO',
+            password='foo2',
+            tenant_name="BAR")
+
+        token = self.controller.authenticate({}, body_dict)
+
+        # Check the catalog
+        self.assertEqual(1, len(token['access']['serviceCatalog']))
+        endpoint = token['access']['serviceCatalog'][0]['endpoints'][0]
+        self.assertEqual(
+            1, len(token['access']['serviceCatalog'][0]['endpoints']))
+
+        exp_endpoint = {
+            'id': endpoint_ref['id'],
+            'publicURL': endpoint_ref['url'],
+            'region': endpoint_ref['region_id'],
+        }
+
+        self.assertEqual(exp_endpoint, endpoint)
+
+    def test_validate_catalog_disabled_endpoint(self):
+        """On validate, get back a catalog that excludes disabled endpoints."""
+        endpoint_ref = self._create_endpoints()
+
+        # Authenticate
+        body_dict = _build_user_auth(
+            username='FOO',
+            password='foo2',
+            tenant_name="BAR")
+
+        token = self.controller.authenticate({}, body_dict)
+
+        # Validate
+        token_id = token['access']['token']['id']
+        validate_ref = self.controller.validate_token(
+            dict(is_admin=True, query_string={}),
+            token_id=token_id)
+
+        # Check the catalog
+        self.assertEqual(1, len(token['access']['serviceCatalog']))
+        endpoint = validate_ref['access']['serviceCatalog'][0]['endpoints'][0]
+        self.assertEqual(
+            1, len(token['access']['serviceCatalog'][0]['endpoints']))
+
+        exp_endpoint = {
+            'id': endpoint_ref['id'],
+            'publicURL': endpoint_ref['url'],
+            'region': endpoint_ref['region_id'],
+        }
+
+        self.assertEqual(exp_endpoint, endpoint)
+
+
+class NonDefaultAuthTest(tests.TestCase):
+
+    def test_add_non_default_auth_method(self):
+        self.config_fixture.config(group='auth',
+                                   methods=['password', 'token', 'custom'])
+        config.setup_authentication()
+        self.assertTrue(hasattr(CONF.auth, 'custom'))
diff --git a/keystone-moon/keystone/tests/unit/test_auth_plugin.py b/keystone-moon/keystone/tests/unit/test_auth_plugin.py
new file mode 100644 (file)
index 0000000..11df95a
--- /dev/null
@@ -0,0 +1,220 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import mock
+
+from keystone import auth
+from keystone import exception
+from keystone.tests import unit as tests
+
+
+# for testing purposes only
+METHOD_NAME = 'simple_challenge_response'
+EXPECTED_RESPONSE = uuid.uuid4().hex
+DEMO_USER_ID = uuid.uuid4().hex
+
+
+class SimpleChallengeResponse(auth.AuthMethodHandler):
+
+    method = METHOD_NAME
+
+    def authenticate(self, context, auth_payload, user_context):
+        if 'response' in auth_payload:
+            if auth_payload['response'] != EXPECTED_RESPONSE:
+                raise exception.Unauthorized('Wrong answer')
+            user_context['user_id'] = DEMO_USER_ID
+        else:
+            return {"challenge": "What's the name of your high school?"}
+
+
+class DuplicateAuthPlugin(SimpleChallengeResponse):
+    """Duplicate simple challenge response auth plugin."""
+
+
+class MismatchedAuthPlugin(SimpleChallengeResponse):
+    method = uuid.uuid4().hex
+
+
+class NoMethodAuthPlugin(auth.AuthMethodHandler):
+    """An auth plugin that does not supply a method attribute."""
+    def authenticate(self, context, auth_payload, auth_context):
+        pass
+
+
+class TestAuthPlugin(tests.SQLDriverOverrides, tests.TestCase):
+    def setUp(self):
+        super(TestAuthPlugin, self).setUp()
+        self.load_backends()
+
+        self.api = auth.controllers.Auth()
+
+    def config_overrides(self):
+        super(TestAuthPlugin, self).config_overrides()
+        method_opts = {
+            'external': 'keystone.auth.plugins.external.DefaultDomain',
+            'password': 'keystone.auth.plugins.password.Password',
+            'token': 'keystone.auth.plugins.token.Token',
+            METHOD_NAME:
+                'keystone.tests.unit.test_auth_plugin.SimpleChallengeResponse',
+        }
+
+        self.auth_plugin_config_override(
+            methods=['external', 'password', 'token', METHOD_NAME],
+            **method_opts)
+
+    def test_unsupported_auth_method(self):
+        method_name = uuid.uuid4().hex
+        auth_data = {'methods': [method_name]}
+        auth_data[method_name] = {'test': 'test'}
+        auth_data = {'identity': auth_data}
+        self.assertRaises(exception.AuthMethodNotSupported,
+                          auth.controllers.AuthInfo.create,
+                          None,
+                          auth_data)
+
+    def test_addition_auth_steps(self):
+        auth_data = {'methods': [METHOD_NAME]}
+        auth_data[METHOD_NAME] = {
+            'test': 'test'}
+        auth_data = {'identity': auth_data}
+        auth_info = auth.controllers.AuthInfo.create(None, auth_data)
+        auth_context = {'extras': {}, 'method_names': []}
+        try:
+            self.api.authenticate({'environment': {}}, auth_info, auth_context)
+        except exception.AdditionalAuthRequired as e:
+            self.assertIn('methods', e.authentication)
+            self.assertIn(METHOD_NAME, e.authentication['methods'])
+            self.assertIn(METHOD_NAME, e.authentication)
+            self.assertIn('challenge', e.authentication[METHOD_NAME])
+
+        # test correct response
+        auth_data = {'methods': [METHOD_NAME]}
+        auth_data[METHOD_NAME] = {
+            'response': EXPECTED_RESPONSE}
+        auth_data = {'identity': auth_data}
+        auth_info = auth.controllers.AuthInfo.create(None, auth_data)
+        auth_context = {'extras': {}, 'method_names': []}
+        self.api.authenticate({'environment': {}}, auth_info, auth_context)
+        self.assertEqual(DEMO_USER_ID, auth_context['user_id'])
+
+        # test incorrect response
+        auth_data = {'methods': [METHOD_NAME]}
+        auth_data[METHOD_NAME] = {
+            'response': uuid.uuid4().hex}
+        auth_data = {'identity': auth_data}
+        auth_info = auth.controllers.AuthInfo.create(None, auth_data)
+        auth_context = {'extras': {}, 'method_names': []}
+        self.assertRaises(exception.Unauthorized,
+                          self.api.authenticate,
+                          {'environment': {}},
+                          auth_info,
+                          auth_context)
+
+
+class TestAuthPluginDynamicOptions(TestAuthPlugin):
+    def config_overrides(self):
+        super(TestAuthPluginDynamicOptions, self).config_overrides()
+        # Clear the override for the [auth] ``methods`` option so it is
+        # possible to load the options from the config file.
+        self.config_fixture.conf.clear_override('methods', group='auth')
+
+    def config_files(self):
+        config_files = super(TestAuthPluginDynamicOptions, self).config_files()
+        config_files.append(tests.dirs.tests_conf('test_auth_plugin.conf'))
+        return config_files
+
+
+class TestInvalidAuthMethodRegistration(tests.TestCase):
+    def test_duplicate_auth_method_registration(self):
+        self.config_fixture.config(
+            group='auth',
+            methods=[
+                'keystone.tests.unit.test_auth_plugin.SimpleChallengeResponse',
+                'keystone.tests.unit.test_auth_plugin.DuplicateAuthPlugin'])
+        self.clear_auth_plugin_registry()
+        self.assertRaises(ValueError, auth.controllers.load_auth_methods)
+
+    def test_no_method_attribute_auth_method_by_class_name_registration(self):
+        self.config_fixture.config(
+            group='auth',
+            methods=['keystone.tests.unit.test_auth_plugin.NoMethodAuthPlugin']
+        )
+        self.clear_auth_plugin_registry()
+        self.assertRaises(ValueError, auth.controllers.load_auth_methods)
+
+
+class TestMapped(tests.TestCase):
+    def setUp(self):
+        super(TestMapped, self).setUp()
+        self.load_backends()
+
+        self.api = auth.controllers.Auth()
+
+    def config_files(self):
+        config_files = super(TestMapped, self).config_files()
+        config_files.append(tests.dirs.tests_conf('test_auth_plugin.conf'))
+        return config_files
+
+    def config_overrides(self):
+        # don't override configs so we can use test_auth_plugin.conf only
+        pass
+
+    def _test_mapped_invocation_with_method_name(self, method_name):
+        with mock.patch.object(auth.plugins.mapped.Mapped,
+                               'authenticate',
+                               return_value=None) as authenticate:
+            context = {'environment': {}}
+            auth_data = {
+                'identity': {
+                    'methods': [method_name],
+                    method_name: {'protocol': method_name},
+                }
+            }
+            auth_info = auth.controllers.AuthInfo.create(context, auth_data)
+            auth_context = {'extras': {},
+                            'method_names': [],
+                            'user_id': uuid.uuid4().hex}
+            self.api.authenticate(context, auth_info, auth_context)
+            # make sure Mapped plugin got invoked with the correct payload
+            ((context, auth_payload, auth_context),
+             kwargs) = authenticate.call_args
+            self.assertEqual(method_name, auth_payload['protocol'])
+
+    def test_mapped_with_remote_user(self):
+        with mock.patch.object(auth.plugins.mapped.Mapped,
+                               'authenticate',
+                               return_value=None) as authenticate:
+            # external plugin should fail and pass to mapped plugin
+            method_name = 'saml2'
+            auth_data = {'methods': [method_name]}
+            # put the method name in the payload so its easier to correlate
+            # method name with payload
+            auth_data[method_name] = {'protocol': method_name}
+            auth_data = {'identity': auth_data}
+            auth_info = auth.controllers.AuthInfo.create(None, auth_data)
+            auth_context = {'extras': {},
+                            'method_names': [],
+                            'user_id': uuid.uuid4().hex}
+            environment = {'environment': {'REMOTE_USER': 'foo@idp.com'}}
+            self.api.authenticate(environment, auth_info, auth_context)
+            # make sure Mapped plugin got invoked with the correct payload
+            ((context, auth_payload, auth_context),
+             kwargs) = authenticate.call_args
+            self.assertEqual(auth_payload['protocol'], method_name)
+
+    def test_supporting_multiple_methods(self):
+        for method_name in ['saml2', 'openid', 'x509']:
+            self._test_mapped_invocation_with_method_name(method_name)
diff --git a/keystone-moon/keystone/tests/unit/test_backend.py b/keystone-moon/keystone/tests/unit/test_backend.py
new file mode 100644 (file)
index 0000000..6cf0649
--- /dev/null
@@ -0,0 +1,5741 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import datetime
+import hashlib
+import uuid
+
+from keystoneclient.common import cms
+import mock
+from oslo_config import cfg
+from oslo_utils import timeutils
+import six
+from testtools import matchers
+
+from keystone.catalog import core
+from keystone.common import driver_hints
+from keystone import exception
+from keystone.tests import unit as tests
+from keystone.tests.unit import default_fixtures
+from keystone.tests.unit import filtering
+from keystone.tests.unit import utils as test_utils
+from keystone.token import provider
+
+
+CONF = cfg.CONF
+DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
+NULL_OBJECT = object()
+
+
+class IdentityTests(object):
+    def _get_domain_fixture(self):
+        domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain['id'], domain)
+        return domain
+
+    def _set_domain_scope(self, domain_id):
+        # We only provide a domain scope if we have multiple drivers
+        if CONF.identity.domain_specific_drivers_enabled:
+            return domain_id
+
+    def test_project_add_and_remove_user_role(self):
+        user_ids = self.assignment_api.list_user_ids_for_project(
+            self.tenant_bar['id'])
+        self.assertNotIn(self.user_two['id'], user_ids)
+
+        self.assignment_api.add_role_to_user_and_project(
+            tenant_id=self.tenant_bar['id'],
+            user_id=self.user_two['id'],
+            role_id=self.role_other['id'])
+        user_ids = self.assignment_api.list_user_ids_for_project(
+            self.tenant_bar['id'])
+        self.assertIn(self.user_two['id'], user_ids)
+
+        self.assignment_api.remove_role_from_user_and_project(
+            tenant_id=self.tenant_bar['id'],
+            user_id=self.user_two['id'],
+            role_id=self.role_other['id'])
+
+        user_ids = self.assignment_api.list_user_ids_for_project(
+            self.tenant_bar['id'])
+        self.assertNotIn(self.user_two['id'], user_ids)
+
+    def test_remove_user_role_not_assigned(self):
+        # Expect failure if attempt to remove a role that was never assigned to
+        # the user.
+        self.assertRaises(exception.RoleNotFound,
+                          self.assignment_api.
+                          remove_role_from_user_and_project,
+                          tenant_id=self.tenant_bar['id'],
+                          user_id=self.user_two['id'],
+                          role_id=self.role_other['id'])
+
+    def test_authenticate_bad_user(self):
+        self.assertRaises(AssertionError,
+                          self.identity_api.authenticate,
+                          context={},
+                          user_id=uuid.uuid4().hex,
+                          password=self.user_foo['password'])
+
+    def test_authenticate_bad_password(self):
+        self.assertRaises(AssertionError,
+                          self.identity_api.authenticate,
+                          context={},
+                          user_id=self.user_foo['id'],
+                          password=uuid.uuid4().hex)
+
+    def test_authenticate(self):
+        user_ref = self.identity_api.authenticate(
+            context={},
+            user_id=self.user_sna['id'],
+            password=self.user_sna['password'])
+        # NOTE(termie): the password field is left in user_sna to make
+        #               it easier to authenticate in tests, but should
+        #               not be returned by the api
+        self.user_sna.pop('password')
+        self.user_sna['enabled'] = True
+        self.assertDictEqual(user_ref, self.user_sna)
+
+    def test_authenticate_and_get_roles_no_metadata(self):
+        user = {
+            'name': 'NO_META',
+            'domain_id': DEFAULT_DOMAIN_ID,
+            'password': 'no_meta2',
+        }
+        new_user = self.identity_api.create_user(user)
+        self.assignment_api.add_user_to_project(self.tenant_baz['id'],
+                                                new_user['id'])
+        user_ref = self.identity_api.authenticate(
+            context={},
+            user_id=new_user['id'],
+            password=user['password'])
+        self.assertNotIn('password', user_ref)
+        # NOTE(termie): the password field is left in user_sna to make
+        #               it easier to authenticate in tests, but should
+        #               not be returned by the api
+        user.pop('password')
+        self.assertDictContainsSubset(user, user_ref)
+        role_list = self.assignment_api.get_roles_for_user_and_project(
+            new_user['id'], self.tenant_baz['id'])
+        self.assertEqual(1, len(role_list))
+        self.assertIn(CONF.member_role_id, role_list)
+
+    def test_authenticate_if_no_password_set(self):
+        id_ = uuid.uuid4().hex
+        user = {
+            'name': uuid.uuid4().hex,
+            'domain_id': DEFAULT_DOMAIN_ID,
+        }
+        self.identity_api.create_user(user)
+
+        self.assertRaises(AssertionError,
+                          self.identity_api.authenticate,
+                          context={},
+                          user_id=id_,
+                          password='password')
+
+    def test_create_unicode_user_name(self):
+        unicode_name = u'name \u540d\u5b57'
+        user = {'name': unicode_name,
+                'domain_id': DEFAULT_DOMAIN_ID,
+                'password': uuid.uuid4().hex}
+        ref = self.identity_api.create_user(user)
+        self.assertEqual(unicode_name, ref['name'])
+
+    def test_get_project(self):
+        tenant_ref = self.resource_api.get_project(self.tenant_bar['id'])
+        self.assertDictEqual(tenant_ref, self.tenant_bar)
+
+    def test_get_project_404(self):
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project,
+                          uuid.uuid4().hex)
+
+    def test_get_project_by_name(self):
+        tenant_ref = self.resource_api.get_project_by_name(
+            self.tenant_bar['name'],
+            DEFAULT_DOMAIN_ID)
+        self.assertDictEqual(tenant_ref, self.tenant_bar)
+
+    def test_get_project_by_name_404(self):
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project_by_name,
+                          uuid.uuid4().hex,
+                          DEFAULT_DOMAIN_ID)
+
+    def test_list_user_ids_for_project(self):
+        user_ids = self.assignment_api.list_user_ids_for_project(
+            self.tenant_baz['id'])
+        self.assertEqual(2, len(user_ids))
+        self.assertIn(self.user_two['id'], user_ids)
+        self.assertIn(self.user_badguy['id'], user_ids)
+
+    def test_list_user_ids_for_project_no_duplicates(self):
+        # Create user
+        user_ref = {
+            'name': uuid.uuid4().hex,
+            'domain_id': DEFAULT_DOMAIN_ID,
+            'password': uuid.uuid4().hex,
+            'enabled': True}
+        user_ref = self.identity_api.create_user(user_ref)
+        # Create project
+        project_ref = {
+            'id': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'domain_id': DEFAULT_DOMAIN_ID}
+        self.resource_api.create_project(
+            project_ref['id'], project_ref)
+        # Create 2 roles and give user each role in project
+        for i in range(2):
+            role_ref = {
+                'id': uuid.uuid4().hex,
+                'name': uuid.uuid4().hex}
+            self.role_api.create_role(role_ref['id'], role_ref)
+            self.assignment_api.add_role_to_user_and_project(
+                user_id=user_ref['id'],
+                tenant_id=project_ref['id'],
+                role_id=role_ref['id'])
+        # Get the list of user_ids in project
+        user_ids = self.assignment_api.list_user_ids_for_project(
+            project_ref['id'])
+        # Ensure the user is only returned once
+        self.assertEqual(1, len(user_ids))
+
+    def test_get_project_user_ids_404(self):
+        self.assertRaises(exception.ProjectNotFound,
+                          self.assignment_api.list_user_ids_for_project,
+                          uuid.uuid4().hex)
+
+    def test_get_user(self):
+        user_ref = self.identity_api.get_user(self.user_foo['id'])
+        # NOTE(termie): the password field is left in user_foo to make
+        #               it easier to authenticate in tests, but should
+        #               not be returned by the api
+        self.user_foo.pop('password')
+        self.assertDictEqual(user_ref, self.user_foo)
+
+    @tests.skip_if_cache_disabled('identity')
+    def test_cache_layer_get_user(self):
+        user = {
+            'name': uuid.uuid4().hex.lower(),
+            'domain_id': DEFAULT_DOMAIN_ID
+        }
+        self.identity_api.create_user(user)
+        ref = self.identity_api.get_user_by_name(user['name'],
+                                                 user['domain_id'])
+        # cache the result.
+        self.identity_api.get_user(ref['id'])
+        # delete bypassing identity api
+        domain_id, driver, entity_id = (
+            self.identity_api._get_domain_driver_and_entity_id(ref['id']))
+        driver.delete_user(entity_id)
+
+        self.assertDictEqual(ref, self.identity_api.get_user(ref['id']))
+        self.identity_api.get_user.invalidate(self.identity_api, ref['id'])
+        self.assertRaises(exception.UserNotFound,
+                          self.identity_api.get_user, ref['id'])
+        user = {
+            'name': uuid.uuid4().hex.lower(),
+            'domain_id': DEFAULT_DOMAIN_ID
+        }
+        self.identity_api.create_user(user)
+        ref = self.identity_api.get_user_by_name(user['name'],
+                                                 user['domain_id'])
+        user['description'] = uuid.uuid4().hex
+        # cache the result.
+        self.identity_api.get_user(ref['id'])
+        # update using identity api and get back updated user.
+        user_updated = self.identity_api.update_user(ref['id'], user)
+        self.assertDictContainsSubset(self.identity_api.get_user(ref['id']),
+                                      user_updated)
+        self.assertDictContainsSubset(
+            self.identity_api.get_user_by_name(ref['name'], ref['domain_id']),
+            user_updated)
+
+    def test_get_user_404(self):
+        self.assertRaises(exception.UserNotFound,
+                          self.identity_api.get_user,
+                          uuid.uuid4().hex)
+
+    def test_get_user_by_name(self):
+        user_ref = self.identity_api.get_user_by_name(
+            self.user_foo['name'], DEFAULT_DOMAIN_ID)
+        # NOTE(termie): the password field is left in user_foo to make
+        #               it easier to authenticate in tests, but should
+        #               not be returned by the api
+        self.user_foo.pop('password')
+        self.assertDictEqual(user_ref, self.user_foo)
+
+    @tests.skip_if_cache_disabled('identity')
+    def test_cache_layer_get_user_by_name(self):
+        user = {
+            'name': uuid.uuid4().hex.lower(),
+            'domain_id': DEFAULT_DOMAIN_ID
+        }
+        self.identity_api.create_user(user)
+        ref = self.identity_api.get_user_by_name(user['name'],
+                                                 user['domain_id'])
+        # delete bypassing the identity api.
+        domain_id, driver, entity_id = (
+            self.identity_api._get_domain_driver_and_entity_id(ref['id']))
+        driver.delete_user(entity_id)
+
+        self.assertDictEqual(ref, self.identity_api.get_user_by_name(
+            user['name'], DEFAULT_DOMAIN_ID))
+        self.identity_api.get_user_by_name.invalidate(
+            self.identity_api, user['name'], DEFAULT_DOMAIN_ID)
+        self.assertRaises(exception.UserNotFound,
+                          self.identity_api.get_user_by_name,
+                          user['name'], DEFAULT_DOMAIN_ID)
+        user = {
+            'name': uuid.uuid4().hex.lower(),
+            'domain_id': DEFAULT_DOMAIN_ID
+        }
+        self.identity_api.create_user(user)
+        ref = self.identity_api.get_user_by_name(user['name'],
+                                                 user['domain_id'])
+        user['description'] = uuid.uuid4().hex
+        user_updated = self.identity_api.update_user(ref['id'], user)
+        self.assertDictContainsSubset(self.identity_api.get_user(ref['id']),
+                                      user_updated)
+        self.assertDictContainsSubset(
+            self.identity_api.get_user_by_name(ref['name'], ref['domain_id']),
+            user_updated)
+
+    def test_get_user_by_name_404(self):
+        self.assertRaises(exception.UserNotFound,
+                          self.identity_api.get_user_by_name,
+                          uuid.uuid4().hex,
+                          DEFAULT_DOMAIN_ID)
+
+    def test_create_duplicate_user_name_fails(self):
+        user = {'name': 'fake1',
+                'domain_id': DEFAULT_DOMAIN_ID,
+                'password': 'fakepass',
+                'tenants': ['bar']}
+        user = self.identity_api.create_user(user)
+        self.assertRaises(exception.Conflict,
+                          self.identity_api.create_user,
+                          user)
+
+    def test_create_duplicate_user_name_in_different_domains(self):
+        new_domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(new_domain['id'], new_domain)
+        user1 = {'name': uuid.uuid4().hex,
+                 'domain_id': DEFAULT_DOMAIN_ID,
+                 'password': uuid.uuid4().hex}
+        user2 = {'name': user1['name'],
+                 'domain_id': new_domain['id'],
+                 'password': uuid.uuid4().hex}
+        self.identity_api.create_user(user1)
+        self.identity_api.create_user(user2)
+
+    def test_move_user_between_domains(self):
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain2['id'], domain2)
+        user = {'name': uuid.uuid4().hex,
+                'domain_id': domain1['id'],
+                'password': uuid.uuid4().hex}
+        user = self.identity_api.create_user(user)
+        user['domain_id'] = domain2['id']
+        self.identity_api.update_user(user['id'], user)
+
+    def test_move_user_between_domains_with_clashing_names_fails(self):
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain2['id'], domain2)
+        # First, create a user in domain1
+        user1 = {'name': uuid.uuid4().hex,
+                 'domain_id': domain1['id'],
+                 'password': uuid.uuid4().hex}
+        user1 = self.identity_api.create_user(user1)
+        # Now create a user in domain2 with a potentially clashing
+        # name - which should work since we have domain separation
+        user2 = {'name': user1['name'],
+                 'domain_id': domain2['id'],
+                 'password': uuid.uuid4().hex}
+        user2 = self.identity_api.create_user(user2)
+        # Now try and move user1 into the 2nd domain - which should
+        # fail since the names clash
+        user1['domain_id'] = domain2['id']
+        self.assertRaises(exception.Conflict,
+                          self.identity_api.update_user,
+                          user1['id'],
+                          user1)
+
+    def test_rename_duplicate_user_name_fails(self):
+        user1 = {'name': 'fake1',
+                 'domain_id': DEFAULT_DOMAIN_ID,
+                 'password': 'fakepass',
+                 'tenants': ['bar']}
+        user2 = {'name': 'fake2',
+                 'domain_id': DEFAULT_DOMAIN_ID,
+                 'password': 'fakepass',
+                 'tenants': ['bar']}
+        self.identity_api.create_user(user1)
+        user2 = self.identity_api.create_user(user2)
+        user2['name'] = 'fake1'
+        self.assertRaises(exception.Conflict,
+                          self.identity_api.update_user,
+                          user2['id'],
+                          user2)
+
+    def test_update_user_id_fails(self):
+        user = {'name': 'fake1',
+                'domain_id': DEFAULT_DOMAIN_ID,
+                'password': 'fakepass',
+                'tenants': ['bar']}
+        user = self.identity_api.create_user(user)
+        original_id = user['id']
+        user['id'] = 'fake2'
+        self.assertRaises(exception.ValidationError,
+                          self.identity_api.update_user,
+                          original_id,
+                          user)
+        user_ref = self.identity_api.get_user(original_id)
+        self.assertEqual(original_id, user_ref['id'])
+        self.assertRaises(exception.UserNotFound,
+                          self.identity_api.get_user,
+                          'fake2')
+
+    def test_create_duplicate_project_id_fails(self):
+        tenant = {'id': 'fake1', 'name': 'fake1',
+                  'domain_id': DEFAULT_DOMAIN_ID}
+        self.resource_api.create_project('fake1', tenant)
+        tenant['name'] = 'fake2'
+        self.assertRaises(exception.Conflict,
+                          self.resource_api.create_project,
+                          'fake1',
+                          tenant)
+
+    def test_create_duplicate_project_name_fails(self):
+        tenant = {'id': 'fake1', 'name': 'fake',
+                  'domain_id': DEFAULT_DOMAIN_ID}
+        self.resource_api.create_project('fake1', tenant)
+        tenant['id'] = 'fake2'
+        self.assertRaises(exception.Conflict,
+                          self.resource_api.create_project,
+                          'fake1',
+                          tenant)
+
+    def test_create_duplicate_project_name_in_different_domains(self):
+        new_domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(new_domain['id'], new_domain)
+        tenant1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                   'domain_id': DEFAULT_DOMAIN_ID}
+        tenant2 = {'id': uuid.uuid4().hex, 'name': tenant1['name'],
+                   'domain_id': new_domain['id']}
+        self.resource_api.create_project(tenant1['id'], tenant1)
+        self.resource_api.create_project(tenant2['id'], tenant2)
+
+    def test_move_project_between_domains(self):
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain2['id'], domain2)
+        project = {'id': uuid.uuid4().hex,
+                   'name': uuid.uuid4().hex,
+                   'domain_id': domain1['id']}
+        self.resource_api.create_project(project['id'], project)
+        project['domain_id'] = domain2['id']
+        self.resource_api.update_project(project['id'], project)
+
+    def test_move_project_between_domains_with_clashing_names_fails(self):
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain2['id'], domain2)
+        # First, create a project in domain1
+        project1 = {'id': uuid.uuid4().hex,
+                    'name': uuid.uuid4().hex,
+                    'domain_id': domain1['id']}
+        self.resource_api.create_project(project1['id'], project1)
+        # Now create a project in domain2 with a potentially clashing
+        # name - which should work since we have domain separation
+        project2 = {'id': uuid.uuid4().hex,
+                    'name': project1['name'],
+                    'domain_id': domain2['id']}
+        self.resource_api.create_project(project2['id'], project2)
+        # Now try and move project1 into the 2nd domain - which should
+        # fail since the names clash
+        project1['domain_id'] = domain2['id']
+        self.assertRaises(exception.Conflict,
+                          self.resource_api.update_project,
+                          project1['id'],
+                          project1)
+
+    def test_rename_duplicate_project_name_fails(self):
+        tenant1 = {'id': 'fake1', 'name': 'fake1',
+                   'domain_id': DEFAULT_DOMAIN_ID}
+        tenant2 = {'id': 'fake2', 'name': 'fake2',
+                   'domain_id': DEFAULT_DOMAIN_ID}
+        self.resource_api.create_project('fake1', tenant1)
+        self.resource_api.create_project('fake2', tenant2)
+        tenant2['name'] = 'fake1'
+        self.assertRaises(exception.Error,
+                          self.resource_api.update_project,
+                          'fake2',
+                          tenant2)
+
+    def test_update_project_id_does_nothing(self):
+        tenant = {'id': 'fake1', 'name': 'fake1',
+                  'domain_id': DEFAULT_DOMAIN_ID}
+        self.resource_api.create_project('fake1', tenant)
+        tenant['id'] = 'fake2'
+        self.resource_api.update_project('fake1', tenant)
+        tenant_ref = self.resource_api.get_project('fake1')
+        self.assertEqual('fake1', tenant_ref['id'])
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project,
+                          'fake2')
+
+    def test_list_role_assignments_unfiltered(self):
+        """Test for unfiltered listing role assignments.
+
+        Test Plan:
+
+        - Create a domain, with a user, group & project
+        - Find how many role assignments already exist (from default
+          fixtures)
+        - Create a grant of each type (user/group on project/domain)
+        - Check the number of assignments has gone up by 4 and that
+          the entries we added are in the list returned
+        - Check that if we list assignments by role_id, then we get back
+          assignments that only contain that role.
+
+        """
+        new_domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(new_domain['id'], new_domain)
+        new_user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+                    'enabled': True, 'domain_id': new_domain['id']}
+        new_user = self.identity_api.create_user(new_user)
+        new_group = {'domain_id': new_domain['id'], 'name': uuid.uuid4().hex}
+        new_group = self.identity_api.create_group(new_group)
+        new_project = {'id': uuid.uuid4().hex,
+                       'name': uuid.uuid4().hex,
+                       'domain_id': new_domain['id']}
+        self.resource_api.create_project(new_project['id'], new_project)
+
+        # First check how many role grants already exist
+        existing_assignments = len(self.assignment_api.list_role_assignments())
+        existing_assignments_for_role = len(
+            self.assignment_api.list_role_assignments_for_role(
+                role_id='admin'))
+
+        # Now create the grants (roles are defined in default_fixtures)
+        self.assignment_api.create_grant(user_id=new_user['id'],
+                                         domain_id=new_domain['id'],
+                                         role_id='member')
+        self.assignment_api.create_grant(user_id=new_user['id'],
+                                         project_id=new_project['id'],
+                                         role_id='other')
+        self.assignment_api.create_grant(group_id=new_group['id'],
+                                         domain_id=new_domain['id'],
+                                         role_id='admin')
+        self.assignment_api.create_grant(group_id=new_group['id'],
+                                         project_id=new_project['id'],
+                                         role_id='admin')
+
+        # Read back the full list of assignments - check it is gone up by 4
+        assignment_list = self.assignment_api.list_role_assignments()
+        self.assertEqual(existing_assignments + 4, len(assignment_list))
+
+        # Now check that each of our four new entries are in the list
+        self.assertIn(
+            {'user_id': new_user['id'], 'domain_id': new_domain['id'],
+             'role_id': 'member'},
+            assignment_list)
+        self.assertIn(
+            {'user_id': new_user['id'], 'project_id': new_project['id'],
+             'role_id': 'other'},
+            assignment_list)
+        self.assertIn(
+            {'group_id': new_group['id'], 'domain_id': new_domain['id'],
+             'role_id': 'admin'},
+            assignment_list)
+        self.assertIn(
+            {'group_id': new_group['id'], 'project_id': new_project['id'],
+             'role_id': 'admin'},
+            assignment_list)
+
+        # Read back the list of assignments for just the admin role, checking
+        # this only goes up by two.
+        assignment_list = self.assignment_api.list_role_assignments_for_role(
+            role_id='admin')
+        self.assertEqual(existing_assignments_for_role + 2,
+                         len(assignment_list))
+
+        # Now check that each of our two new entries are in the list
+        self.assertIn(
+            {'group_id': new_group['id'], 'domain_id': new_domain['id'],
+             'role_id': 'admin'},
+            assignment_list)
+        self.assertIn(
+            {'group_id': new_group['id'], 'project_id': new_project['id'],
+             'role_id': 'admin'},
+            assignment_list)
+
+    def test_list_group_role_assignment(self):
+        # When a group role assignment is created and the role assignments are
+        # listed then the group role assignment is included in the list.
+
+        MEMBER_ROLE_ID = 'member'
+
+        def get_member_assignments():
+            assignments = self.assignment_api.list_role_assignments()
+            return filter(lambda x: x['role_id'] == MEMBER_ROLE_ID,
+                          assignments)
+
+        orig_member_assignments = get_member_assignments()
+
+        # Create a group.
+        new_group = {
+            'domain_id': DEFAULT_DOMAIN_ID,
+            'name': self.getUniqueString(prefix='tlgra')}
+        new_group = self.identity_api.create_group(new_group)
+
+        # Create a project.
+        new_project = {
+            'id': uuid.uuid4().hex,
+            'name': self.getUniqueString(prefix='tlgra'),
+            'domain_id': DEFAULT_DOMAIN_ID}
+        self.resource_api.create_project(new_project['id'], new_project)
+
+        # Assign a role to the group.
+        self.assignment_api.create_grant(
+            group_id=new_group['id'], project_id=new_project['id'],
+            role_id=MEMBER_ROLE_ID)
+
+        # List role assignments
+        new_member_assignments = get_member_assignments()
+
+        expected_member_assignments = orig_member_assignments + [{
+            'group_id': new_group['id'], 'project_id': new_project['id'],
+            'role_id': MEMBER_ROLE_ID}]
+        self.assertThat(new_member_assignments,
+                        matchers.Equals(expected_member_assignments))
+
+    def test_list_role_assignments_bad_role(self):
+        assignment_list = self.assignment_api.list_role_assignments_for_role(
+            role_id=uuid.uuid4().hex)
+        self.assertEqual([], assignment_list)
+
+    def test_add_duplicate_role_grant(self):
+        roles_ref = self.assignment_api.get_roles_for_user_and_project(
+            self.user_foo['id'], self.tenant_bar['id'])
+        self.assertNotIn(self.role_admin['id'], roles_ref)
+        self.assignment_api.add_role_to_user_and_project(
+            self.user_foo['id'], self.tenant_bar['id'], self.role_admin['id'])
+        self.assertRaises(exception.Conflict,
+                          self.assignment_api.add_role_to_user_and_project,
+                          self.user_foo['id'],
+                          self.tenant_bar['id'],
+                          self.role_admin['id'])
+
+    def test_get_role_by_user_and_project_with_user_in_group(self):
+        """Test for get role by user and project, user was added into a group.
+
+        Test Plan:
+
+        - Create a user, a project & a group, add this user to group
+        - Create roles and grant them to user and project
+        - Check the role list get by the user and project was as expected
+
+        """
+        user_ref = {'name': uuid.uuid4().hex,
+                    'domain_id': DEFAULT_DOMAIN_ID,
+                    'password': uuid.uuid4().hex,
+                    'enabled': True}
+        user_ref = self.identity_api.create_user(user_ref)
+
+        project_ref = {'id': uuid.uuid4().hex,
+                       'name': uuid.uuid4().hex,
+                       'domain_id': DEFAULT_DOMAIN_ID}
+        self.resource_api.create_project(project_ref['id'], project_ref)
+
+        group = {'name': uuid.uuid4().hex,
+                 'domain_id': DEFAULT_DOMAIN_ID}
+        group_id = self.identity_api.create_group(group)['id']
+        self.identity_api.add_user_to_group(user_ref['id'], group_id)
+
+        role_ref_list = []
+        for i in range(2):
+            role_ref = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+            self.role_api.create_role(role_ref['id'], role_ref)
+            role_ref_list.append(role_ref)
+
+            self.assignment_api.add_role_to_user_and_project(
+                user_id=user_ref['id'],
+                tenant_id=project_ref['id'],
+                role_id=role_ref['id'])
+
+        role_list = self.assignment_api.get_roles_for_user_and_project(
+            user_id=user_ref['id'],
+            tenant_id=project_ref['id'])
+
+        self.assertEqual(set(role_list),
+                         set([r['id'] for r in role_ref_list]))
+
+    def test_get_role_by_user_and_project(self):
+        roles_ref = self.assignment_api.get_roles_for_user_and_project(
+            self.user_foo['id'], self.tenant_bar['id'])
+        self.assertNotIn(self.role_admin['id'], roles_ref)
+        self.assignment_api.add_role_to_user_and_project(
+            self.user_foo['id'], self.tenant_bar['id'], self.role_admin['id'])
+        roles_ref = self.assignment_api.get_roles_for_user_and_project(
+            self.user_foo['id'], self.tenant_bar['id'])
+        self.assertIn(self.role_admin['id'], roles_ref)
+        self.assertNotIn('member', roles_ref)
+
+        self.assignment_api.add_role_to_user_and_project(
+            self.user_foo['id'], self.tenant_bar['id'], 'member')
+        roles_ref = self.assignment_api.get_roles_for_user_and_project(
+            self.user_foo['id'], self.tenant_bar['id'])
+        self.assertIn(self.role_admin['id'], roles_ref)
+        self.assertIn('member', roles_ref)
+
+    def test_get_roles_for_user_and_domain(self):
+        """Test for getting roles for user on a domain.
+
+        Test Plan:
+
+        - Create a domain, with 2 users
+        - Check no roles yet exit
+        - Give user1 two roles on the domain, user2 one role
+        - Get roles on user1 and the domain - maybe sure we only
+          get back the 2 roles on user1
+        - Delete both roles from user1
+        - Check we get no roles back for user1 on domain
+
+        """
+        new_domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(new_domain['id'], new_domain)
+        new_user1 = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+                     'enabled': True, 'domain_id': new_domain['id']}
+        new_user1 = self.identity_api.create_user(new_user1)
+        new_user2 = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+                     'enabled': True, 'domain_id': new_domain['id']}
+        new_user2 = self.identity_api.create_user(new_user2)
+        roles_ref = self.assignment_api.list_grants(
+            user_id=new_user1['id'],
+            domain_id=new_domain['id'])
+        self.assertEqual(0, len(roles_ref))
+        # Now create the grants (roles are defined in default_fixtures)
+        self.assignment_api.create_grant(user_id=new_user1['id'],
+                                         domain_id=new_domain['id'],
+                                         role_id='member')
+        self.assignment_api.create_grant(user_id=new_user1['id'],
+                                         domain_id=new_domain['id'],
+                                         role_id='other')
+        self.assignment_api.create_grant(user_id=new_user2['id'],
+                                         domain_id=new_domain['id'],
+                                         role_id='admin')
+        # Read back the roles for user1 on domain
+        roles_ids = self.assignment_api.get_roles_for_user_and_domain(
+            new_user1['id'], new_domain['id'])
+        self.assertEqual(2, len(roles_ids))
+        self.assertIn(self.role_member['id'], roles_ids)
+        self.assertIn(self.role_other['id'], roles_ids)
+
+        # Now delete both grants for user1
+        self.assignment_api.delete_grant(user_id=new_user1['id'],
+                                         domain_id=new_domain['id'],
+                                         role_id='member')
+        self.assignment_api.delete_grant(user_id=new_user1['id'],
+                                         domain_id=new_domain['id'],
+                                         role_id='other')
+        roles_ref = self.assignment_api.list_grants(
+            user_id=new_user1['id'],
+            domain_id=new_domain['id'])
+        self.assertEqual(0, len(roles_ref))
+
+    def test_get_roles_for_user_and_domain_404(self):
+        """Test errors raised when getting roles for user on a domain.
+
+        Test Plan:
+
+        - Check non-existing user gives UserNotFound
+        - Check non-existing domain gives DomainNotFound
+
+        """
+        new_domain = self._get_domain_fixture()
+        new_user1 = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+                     'enabled': True, 'domain_id': new_domain['id']}
+        new_user1 = self.identity_api.create_user(new_user1)
+
+        self.assertRaises(exception.UserNotFound,
+                          self.assignment_api.get_roles_for_user_and_domain,
+                          uuid.uuid4().hex,
+                          new_domain['id'])
+
+        self.assertRaises(exception.DomainNotFound,
+                          self.assignment_api.get_roles_for_user_and_domain,
+                          new_user1['id'],
+                          uuid.uuid4().hex)
+
+    def test_get_roles_for_user_and_project_404(self):
+        self.assertRaises(exception.UserNotFound,
+                          self.assignment_api.get_roles_for_user_and_project,
+                          uuid.uuid4().hex,
+                          self.tenant_bar['id'])
+
+        self.assertRaises(exception.ProjectNotFound,
+                          self.assignment_api.get_roles_for_user_and_project,
+                          self.user_foo['id'],
+                          uuid.uuid4().hex)
+
+    def test_add_role_to_user_and_project_404(self):
+        self.assertRaises(exception.ProjectNotFound,
+                          self.assignment_api.add_role_to_user_and_project,
+                          self.user_foo['id'],
+                          uuid.uuid4().hex,
+                          self.role_admin['id'])
+
+        self.assertRaises(exception.RoleNotFound,
+                          self.assignment_api.add_role_to_user_and_project,
+                          self.user_foo['id'],
+                          self.tenant_bar['id'],
+                          uuid.uuid4().hex)
+
+    def test_add_role_to_user_and_project_no_user(self):
+        # If add_role_to_user_and_project and the user doesn't exist, then
+        # no error.
+        user_id_not_exist = uuid.uuid4().hex
+        self.assignment_api.add_role_to_user_and_project(
+            user_id_not_exist, self.tenant_bar['id'], self.role_admin['id'])
+
+    def test_remove_role_from_user_and_project(self):
+        self.assignment_api.add_role_to_user_and_project(
+            self.user_foo['id'], self.tenant_bar['id'], 'member')
+        self.assignment_api.remove_role_from_user_and_project(
+            self.user_foo['id'], self.tenant_bar['id'], 'member')
+        roles_ref = self.assignment_api.get_roles_for_user_and_project(
+            self.user_foo['id'], self.tenant_bar['id'])
+        self.assertNotIn('member', roles_ref)
+        self.assertRaises(exception.NotFound,
+                          self.assignment_api.
+                          remove_role_from_user_and_project,
+                          self.user_foo['id'],
+                          self.tenant_bar['id'],
+                          'member')
+
+    def test_get_role_grant_by_user_and_project(self):
+        roles_ref = self.assignment_api.list_grants(
+            user_id=self.user_foo['id'],
+            project_id=self.tenant_bar['id'])
+        self.assertEqual(1, len(roles_ref))
+        self.assignment_api.create_grant(user_id=self.user_foo['id'],
+                                         project_id=self.tenant_bar['id'],
+                                         role_id=self.role_admin['id'])
+        roles_ref = self.assignment_api.list_grants(
+            user_id=self.user_foo['id'],
+            project_id=self.tenant_bar['id'])
+        self.assertIn(self.role_admin['id'],
+                      [role_ref['id'] for role_ref in roles_ref])
+
+        self.assignment_api.create_grant(user_id=self.user_foo['id'],
+                                         project_id=self.tenant_bar['id'],
+                                         role_id='member')
+        roles_ref = self.assignment_api.list_grants(
+            user_id=self.user_foo['id'],
+            project_id=self.tenant_bar['id'])
+
+        roles_ref_ids = []
+        for ref in roles_ref:
+            roles_ref_ids.append(ref['id'])
+        self.assertIn(self.role_admin['id'], roles_ref_ids)
+        self.assertIn('member', roles_ref_ids)
+
+    def test_remove_role_grant_from_user_and_project(self):
+        self.assignment_api.create_grant(user_id=self.user_foo['id'],
+                                         project_id=self.tenant_baz['id'],
+                                         role_id='member')
+        roles_ref = self.assignment_api.list_grants(
+            user_id=self.user_foo['id'],
+            project_id=self.tenant_baz['id'])
+        self.assertDictEqual(roles_ref[0], self.role_member)
+
+        self.assignment_api.delete_grant(user_id=self.user_foo['id'],
+                                         project_id=self.tenant_baz['id'],
+                                         role_id='member')
+        roles_ref = self.assignment_api.list_grants(
+            user_id=self.user_foo['id'],
+            project_id=self.tenant_baz['id'])
+        self.assertEqual(0, len(roles_ref))
+        self.assertRaises(exception.RoleAssignmentNotFound,
+                          self.assignment_api.delete_grant,
+                          user_id=self.user_foo['id'],
+                          project_id=self.tenant_baz['id'],
+                          role_id='member')
+
+    def test_get_role_assignment_by_project_not_found(self):
+        self.assertRaises(exception.RoleAssignmentNotFound,
+                          self.assignment_api.check_grant_role_id,
+                          user_id=self.user_foo['id'],
+                          project_id=self.tenant_baz['id'],
+                          role_id='member')
+
+        self.assertRaises(exception.RoleAssignmentNotFound,
+                          self.assignment_api.check_grant_role_id,
+                          group_id=uuid.uuid4().hex,
+                          project_id=self.tenant_baz['id'],
+                          role_id='member')
+
+    def test_get_role_assignment_by_domain_not_found(self):
+        self.assertRaises(exception.RoleAssignmentNotFound,
+                          self.assignment_api.check_grant_role_id,
+                          user_id=self.user_foo['id'],
+                          domain_id=self.domain_default['id'],
+                          role_id='member')
+
+        self.assertRaises(exception.RoleAssignmentNotFound,
+                          self.assignment_api.check_grant_role_id,
+                          group_id=uuid.uuid4().hex,
+                          domain_id=self.domain_default['id'],
+                          role_id='member')
+
+    def test_del_role_assignment_by_project_not_found(self):
+        self.assertRaises(exception.RoleAssignmentNotFound,
+                          self.assignment_api.delete_grant,
+                          user_id=self.user_foo['id'],
+                          project_id=self.tenant_baz['id'],
+                          role_id='member')
+
+        self.assertRaises(exception.RoleAssignmentNotFound,
+                          self.assignment_api.delete_grant,
+                          group_id=uuid.uuid4().hex,
+                          project_id=self.tenant_baz['id'],
+                          role_id='member')
+
+    def test_del_role_assignment_by_domain_not_found(self):
+        self.assertRaises(exception.RoleAssignmentNotFound,
+                          self.assignment_api.delete_grant,
+                          user_id=self.user_foo['id'],
+                          domain_id=self.domain_default['id'],
+                          role_id='member')
+
+        self.assertRaises(exception.RoleAssignmentNotFound,
+                          self.assignment_api.delete_grant,
+                          group_id=uuid.uuid4().hex,
+                          domain_id=self.domain_default['id'],
+                          role_id='member')
+
+    def test_get_and_remove_role_grant_by_group_and_project(self):
+        new_domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(new_domain['id'], new_domain)
+        new_group = {'domain_id': new_domain['id'], 'name': uuid.uuid4().hex}
+        new_group = self.identity_api.create_group(new_group)
+        new_user = {'name': 'new_user', 'password': 'secret',
+                    'enabled': True, 'domain_id': new_domain['id']}
+        new_user = self.identity_api.create_user(new_user)
+        self.identity_api.add_user_to_group(new_user['id'],
+                                            new_group['id'])
+        roles_ref = self.assignment_api.list_grants(
+            group_id=new_group['id'],
+            project_id=self.tenant_bar['id'])
+        self.assertEqual(0, len(roles_ref))
+        self.assignment_api.create_grant(group_id=new_group['id'],
+                                         project_id=self.tenant_bar['id'],
+                                         role_id='member')
+        roles_ref = self.assignment_api.list_grants(
+            group_id=new_group['id'],
+            project_id=self.tenant_bar['id'])
+        self.assertDictEqual(roles_ref[0], self.role_member)
+
+        self.assignment_api.delete_grant(group_id=new_group['id'],
+                                         project_id=self.tenant_bar['id'],
+                                         role_id='member')
+        roles_ref = self.assignment_api.list_grants(
+            group_id=new_group['id'],
+            project_id=self.tenant_bar['id'])
+        self.assertEqual(0, len(roles_ref))
+        self.assertRaises(exception.RoleAssignmentNotFound,
+                          self.assignment_api.delete_grant,
+                          group_id=new_group['id'],
+                          project_id=self.tenant_bar['id'],
+                          role_id='member')
+
+    def test_get_and_remove_role_grant_by_group_and_domain(self):
+        new_domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(new_domain['id'], new_domain)
+        new_group = {'domain_id': new_domain['id'], 'name': uuid.uuid4().hex}
+        new_group = self.identity_api.create_group(new_group)
+        new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
+                    'enabled': True, 'domain_id': new_domain['id']}
+        new_user = self.identity_api.create_user(new_user)
+        self.identity_api.add_user_to_group(new_user['id'],
+                                            new_group['id'])
+
+        roles_ref = self.assignment_api.list_grants(
+            group_id=new_group['id'],
+            domain_id=new_domain['id'])
+        self.assertEqual(0, len(roles_ref))
+
+        self.assignment_api.create_grant(group_id=new_group['id'],
+                                         domain_id=new_domain['id'],
+                                         role_id='member')
+
+        roles_ref = self.assignment_api.list_grants(
+            group_id=new_group['id'],
+            domain_id=new_domain['id'])
+        self.assertDictEqual(roles_ref[0], self.role_member)
+
+        self.assignment_api.delete_grant(group_id=new_group['id'],
+                                         domain_id=new_domain['id'],
+                                         role_id='member')
+        roles_ref = self.assignment_api.list_grants(
+            group_id=new_group['id'],
+            domain_id=new_domain['id'])
+        self.assertEqual(0, len(roles_ref))
+        self.assertRaises(exception.RoleAssignmentNotFound,
+                          self.assignment_api.delete_grant,
+                          group_id=new_group['id'],
+                          domain_id=new_domain['id'],
+                          role_id='member')
+
+    def test_get_and_remove_correct_role_grant_from_a_mix(self):
+        new_domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(new_domain['id'], new_domain)
+        new_project = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                       'domain_id': new_domain['id']}
+        self.resource_api.create_project(new_project['id'], new_project)
+        new_group = {'domain_id': new_domain['id'], 'name': uuid.uuid4().hex}
+        new_group = self.identity_api.create_group(new_group)
+        new_group2 = {'domain_id': new_domain['id'], 'name': uuid.uuid4().hex}
+        new_group2 = self.identity_api.create_group(new_group2)
+        new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
+                    'enabled': True, 'domain_id': new_domain['id']}
+        new_user = self.identity_api.create_user(new_user)
+        new_user2 = {'name': 'new_user2', 'password': uuid.uuid4().hex,
+                     'enabled': True, 'domain_id': new_domain['id']}
+        new_user2 = self.identity_api.create_user(new_user2)
+        self.identity_api.add_user_to_group(new_user['id'],
+                                            new_group['id'])
+        # First check we have no grants
+        roles_ref = self.assignment_api.list_grants(
+            group_id=new_group['id'],
+            domain_id=new_domain['id'])
+        self.assertEqual(0, len(roles_ref))
+        # Now add the grant we are going to test for, and some others as
+        # well just to make sure we get back the right one
+        self.assignment_api.create_grant(group_id=new_group['id'],
+                                         domain_id=new_domain['id'],
+                                         role_id='member')
+
+        self.assignment_api.create_grant(group_id=new_group2['id'],
+                                         domain_id=new_domain['id'],
+                                         role_id=self.role_admin['id'])
+        self.assignment_api.create_grant(user_id=new_user2['id'],
+                                         domain_id=new_domain['id'],
+                                         role_id=self.role_admin['id'])
+        self.assignment_api.create_grant(group_id=new_group['id'],
+                                         project_id=new_project['id'],
+                                         role_id=self.role_admin['id'])
+
+        roles_ref = self.assignment_api.list_grants(
+            group_id=new_group['id'],
+            domain_id=new_domain['id'])
+        self.assertDictEqual(roles_ref[0], self.role_member)
+
+        self.assignment_api.delete_grant(group_id=new_group['id'],
+                                         domain_id=new_domain['id'],
+                                         role_id='member')
+        roles_ref = self.assignment_api.list_grants(
+            group_id=new_group['id'],
+            domain_id=new_domain['id'])
+        self.assertEqual(0, len(roles_ref))
+        self.assertRaises(exception.RoleAssignmentNotFound,
+                          self.assignment_api.delete_grant,
+                          group_id=new_group['id'],
+                          domain_id=new_domain['id'],
+                          role_id='member')
+
+    def test_get_and_remove_role_grant_by_user_and_domain(self):
+        new_domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(new_domain['id'], new_domain)
+        new_user = {'name': 'new_user', 'password': 'secret',
+                    'enabled': True, 'domain_id': new_domain['id']}
+        new_user = self.identity_api.create_user(new_user)
+        roles_ref = self.assignment_api.list_grants(
+            user_id=new_user['id'],
+            domain_id=new_domain['id'])
+        self.assertEqual(0, len(roles_ref))
+        self.assignment_api.create_grant(user_id=new_user['id'],
+                                         domain_id=new_domain['id'],
+                                         role_id='member')
+        roles_ref = self.assignment_api.list_grants(
+            user_id=new_user['id'],
+            domain_id=new_domain['id'])
+        self.assertDictEqual(roles_ref[0], self.role_member)
+
+        self.assignment_api.delete_grant(user_id=new_user['id'],
+                                         domain_id=new_domain['id'],
+                                         role_id='member')
+        roles_ref = self.assignment_api.list_grants(
+            user_id=new_user['id'],
+            domain_id=new_domain['id'])
+        self.assertEqual(0, len(roles_ref))
+        self.assertRaises(exception.RoleAssignmentNotFound,
+                          self.assignment_api.delete_grant,
+                          user_id=new_user['id'],
+                          domain_id=new_domain['id'],
+                          role_id='member')
+
+    def test_get_and_remove_role_grant_by_group_and_cross_domain(self):
+        group1_domain1_role = {'id': uuid.uuid4().hex,
+                               'name': uuid.uuid4().hex}
+        self.role_api.create_role(group1_domain1_role['id'],
+                                  group1_domain1_role)
+        group1_domain2_role = {'id': uuid.uuid4().hex,
+                               'name': uuid.uuid4().hex}
+        self.role_api.create_role(group1_domain2_role['id'],
+                                  group1_domain2_role)
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain2['id'], domain2)
+        group1 = {'domain_id': domain1['id'], 'name': uuid.uuid4().hex}
+        group1 = self.identity_api.create_group(group1)
+        roles_ref = self.assignment_api.list_grants(
+            group_id=group1['id'],
+            domain_id=domain1['id'])
+        self.assertEqual(0, len(roles_ref))
+        roles_ref = self.assignment_api.list_grants(
+            group_id=group1['id'],
+            domain_id=domain2['id'])
+        self.assertEqual(0, len(roles_ref))
+        self.assignment_api.create_grant(group_id=group1['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=group1_domain1_role['id'])
+        self.assignment_api.create_grant(group_id=group1['id'],
+                                         domain_id=domain2['id'],
+                                         role_id=group1_domain2_role['id'])
+        roles_ref = self.assignment_api.list_grants(
+            group_id=group1['id'],
+            domain_id=domain1['id'])
+        self.assertDictEqual(roles_ref[0], group1_domain1_role)
+        roles_ref = self.assignment_api.list_grants(
+            group_id=group1['id'],
+            domain_id=domain2['id'])
+        self.assertDictEqual(roles_ref[0], group1_domain2_role)
+
+        self.assignment_api.delete_grant(group_id=group1['id'],
+                                         domain_id=domain2['id'],
+                                         role_id=group1_domain2_role['id'])
+        roles_ref = self.assignment_api.list_grants(
+            group_id=group1['id'],
+            domain_id=domain2['id'])
+        self.assertEqual(0, len(roles_ref))
+        self.assertRaises(exception.RoleAssignmentNotFound,
+                          self.assignment_api.delete_grant,
+                          group_id=group1['id'],
+                          domain_id=domain2['id'],
+                          role_id=group1_domain2_role['id'])
+
+    def test_get_and_remove_role_grant_by_user_and_cross_domain(self):
+        user1_domain1_role = {'id': uuid.uuid4().hex,
+                              'name': uuid.uuid4().hex}
+        self.role_api.create_role(user1_domain1_role['id'], user1_domain1_role)
+        user1_domain2_role = {'id': uuid.uuid4().hex,
+                              'name': uuid.uuid4().hex}
+        self.role_api.create_role(user1_domain2_role['id'], user1_domain2_role)
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain2['id'], domain2)
+        user1 = {'name': uuid.uuid4().hex, 'domain_id': domain1['id'],
+                 'password': uuid.uuid4().hex, 'enabled': True}
+        user1 = self.identity_api.create_user(user1)
+        roles_ref = self.assignment_api.list_grants(
+            user_id=user1['id'],
+            domain_id=domain1['id'])
+        self.assertEqual(0, len(roles_ref))
+        roles_ref = self.assignment_api.list_grants(
+            user_id=user1['id'],
+            domain_id=domain2['id'])
+        self.assertEqual(0, len(roles_ref))
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=user1_domain1_role['id'])
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         domain_id=domain2['id'],
+                                         role_id=user1_domain2_role['id'])
+        roles_ref = self.assignment_api.list_grants(
+            user_id=user1['id'],
+            domain_id=domain1['id'])
+        self.assertDictEqual(roles_ref[0], user1_domain1_role)
+        roles_ref = self.assignment_api.list_grants(
+            user_id=user1['id'],
+            domain_id=domain2['id'])
+        self.assertDictEqual(roles_ref[0], user1_domain2_role)
+
+        self.assignment_api.delete_grant(user_id=user1['id'],
+                                         domain_id=domain2['id'],
+                                         role_id=user1_domain2_role['id'])
+        roles_ref = self.assignment_api.list_grants(
+            user_id=user1['id'],
+            domain_id=domain2['id'])
+        self.assertEqual(0, len(roles_ref))
+        self.assertRaises(exception.RoleAssignmentNotFound,
+                          self.assignment_api.delete_grant,
+                          user_id=user1['id'],
+                          domain_id=domain2['id'],
+                          role_id=user1_domain2_role['id'])
+
+    def test_role_grant_by_group_and_cross_domain_project(self):
+        role1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.role_api.create_role(role1['id'], role1)
+        role2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.role_api.create_role(role2['id'], role2)
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain2['id'], domain2)
+        group1 = {'name': uuid.uuid4().hex, 'domain_id': domain1['id'],
+                  'enabled': True}
+        group1 = self.identity_api.create_group(group1)
+        project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain2['id']}
+        self.resource_api.create_project(project1['id'], project1)
+        roles_ref = self.assignment_api.list_grants(
+            group_id=group1['id'],
+            project_id=project1['id'])
+        self.assertEqual(0, len(roles_ref))
+        self.assignment_api.create_grant(group_id=group1['id'],
+                                         project_id=project1['id'],
+                                         role_id=role1['id'])
+        self.assignment_api.create_grant(group_id=group1['id'],
+                                         project_id=project1['id'],
+                                         role_id=role2['id'])
+        roles_ref = self.assignment_api.list_grants(
+            group_id=group1['id'],
+            project_id=project1['id'])
+
+        roles_ref_ids = []
+        for ref in roles_ref:
+            roles_ref_ids.append(ref['id'])
+        self.assertIn(role1['id'], roles_ref_ids)
+        self.assertIn(role2['id'], roles_ref_ids)
+
+        self.assignment_api.delete_grant(group_id=group1['id'],
+                                         project_id=project1['id'],
+                                         role_id=role1['id'])
+        roles_ref = self.assignment_api.list_grants(
+            group_id=group1['id'],
+            project_id=project1['id'])
+        self.assertEqual(1, len(roles_ref))
+        self.assertDictEqual(roles_ref[0], role2)
+
+    def test_role_grant_by_user_and_cross_domain_project(self):
+        role1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.role_api.create_role(role1['id'], role1)
+        role2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.role_api.create_role(role2['id'], role2)
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain2['id'], domain2)
+        user1 = {'name': uuid.uuid4().hex, 'domain_id': domain1['id'],
+                 'password': uuid.uuid4().hex, 'enabled': True}
+        user1 = self.identity_api.create_user(user1)
+        project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain2['id']}
+        self.resource_api.create_project(project1['id'], project1)
+        roles_ref = self.assignment_api.list_grants(
+            user_id=user1['id'],
+            project_id=project1['id'])
+        self.assertEqual(0, len(roles_ref))
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         project_id=project1['id'],
+                                         role_id=role1['id'])
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         project_id=project1['id'],
+                                         role_id=role2['id'])
+        roles_ref = self.assignment_api.list_grants(
+            user_id=user1['id'],
+            project_id=project1['id'])
+
+        roles_ref_ids = []
+        for ref in roles_ref:
+            roles_ref_ids.append(ref['id'])
+        self.assertIn(role1['id'], roles_ref_ids)
+        self.assertIn(role2['id'], roles_ref_ids)
+
+        self.assignment_api.delete_grant(user_id=user1['id'],
+                                         project_id=project1['id'],
+                                         role_id=role1['id'])
+        roles_ref = self.assignment_api.list_grants(
+            user_id=user1['id'],
+            project_id=project1['id'])
+        self.assertEqual(1, len(roles_ref))
+        self.assertDictEqual(roles_ref[0], role2)
+
+    def test_delete_user_grant_no_user(self):
+        # Can delete a grant where the user doesn't exist.
+        role_id = uuid.uuid4().hex
+        role = {'id': role_id, 'name': uuid.uuid4().hex}
+        self.role_api.create_role(role_id, role)
+
+        user_id = uuid.uuid4().hex
+
+        self.assignment_api.create_grant(role_id, user_id=user_id,
+                                         project_id=self.tenant_bar['id'])
+
+        self.assignment_api.delete_grant(role_id, user_id=user_id,
+                                         project_id=self.tenant_bar['id'])
+
+    def test_delete_group_grant_no_group(self):
+        # Can delete a grant where the group doesn't exist.
+        role_id = uuid.uuid4().hex
+        role = {'id': role_id, 'name': uuid.uuid4().hex}
+        self.role_api.create_role(role_id, role)
+
+        group_id = uuid.uuid4().hex
+
+        self.assignment_api.create_grant(role_id, group_id=group_id,
+                                         project_id=self.tenant_bar['id'])
+
+        self.assignment_api.delete_grant(role_id, group_id=group_id,
+                                         project_id=self.tenant_bar['id'])
+
+    def test_grant_crud_throws_exception_if_invalid_role(self):
+        """Ensure RoleNotFound thrown if role does not exist."""
+
+        def assert_role_not_found_exception(f, **kwargs):
+            self.assertRaises(exception.RoleNotFound, f,
+                              role_id=uuid.uuid4().hex, **kwargs)
+
+        user = {'name': uuid.uuid4().hex, 'domain_id': DEFAULT_DOMAIN_ID,
+                'password': uuid.uuid4().hex, 'enabled': True}
+        user_resp = self.identity_api.create_user(user)
+        group = {'name': uuid.uuid4().hex, 'domain_id': DEFAULT_DOMAIN_ID,
+                 'enabled': True}
+        group_resp = self.identity_api.create_group(group)
+        project = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                   'domain_id': DEFAULT_DOMAIN_ID}
+        project_resp = self.resource_api.create_project(project['id'], project)
+
+        for manager_call in [self.assignment_api.create_grant,
+                             self.assignment_api.get_grant,
+                             self.assignment_api.delete_grant]:
+            assert_role_not_found_exception(
+                manager_call,
+                user_id=user_resp['id'], project_id=project_resp['id'])
+            assert_role_not_found_exception(
+                manager_call,
+                group_id=group_resp['id'], project_id=project_resp['id'])
+            assert_role_not_found_exception(
+                manager_call,
+                user_id=user_resp['id'], domain_id=DEFAULT_DOMAIN_ID)
+            assert_role_not_found_exception(
+                manager_call,
+                group_id=group_resp['id'], domain_id=DEFAULT_DOMAIN_ID)
+
+    def test_multi_role_grant_by_user_group_on_project_domain(self):
+        role_list = []
+        for _ in range(10):
+            role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+            self.role_api.create_role(role['id'], role)
+            role_list.append(role)
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        user1 = {'name': uuid.uuid4().hex, 'domain_id': domain1['id'],
+                 'password': uuid.uuid4().hex, 'enabled': True}
+        user1 = self.identity_api.create_user(user1)
+        group1 = {'name': uuid.uuid4().hex, 'domain_id': domain1['id'],
+                  'enabled': True}
+        group1 = self.identity_api.create_group(group1)
+        group2 = {'name': uuid.uuid4().hex, 'domain_id': domain1['id'],
+                  'enabled': True}
+        group2 = self.identity_api.create_group(group2)
+        project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain1['id']}
+        self.resource_api.create_project(project1['id'], project1)
+
+        self.identity_api.add_user_to_group(user1['id'],
+                                            group1['id'])
+        self.identity_api.add_user_to_group(user1['id'],
+                                            group2['id'])
+
+        roles_ref = self.assignment_api.list_grants(
+            user_id=user1['id'],
+            project_id=project1['id'])
+        self.assertEqual(0, len(roles_ref))
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role_list[0]['id'])
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role_list[1]['id'])
+        self.assignment_api.create_grant(group_id=group1['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role_list[2]['id'])
+        self.assignment_api.create_grant(group_id=group1['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role_list[3]['id'])
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         project_id=project1['id'],
+                                         role_id=role_list[4]['id'])
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         project_id=project1['id'],
+                                         role_id=role_list[5]['id'])
+        self.assignment_api.create_grant(group_id=group1['id'],
+                                         project_id=project1['id'],
+                                         role_id=role_list[6]['id'])
+        self.assignment_api.create_grant(group_id=group1['id'],
+                                         project_id=project1['id'],
+                                         role_id=role_list[7]['id'])
+        roles_ref = self.assignment_api.list_grants(user_id=user1['id'],
+                                                    domain_id=domain1['id'])
+        self.assertEqual(2, len(roles_ref))
+        self.assertIn(role_list[0], roles_ref)
+        self.assertIn(role_list[1], roles_ref)
+        roles_ref = self.assignment_api.list_grants(group_id=group1['id'],
+                                                    domain_id=domain1['id'])
+        self.assertEqual(2, len(roles_ref))
+        self.assertIn(role_list[2], roles_ref)
+        self.assertIn(role_list[3], roles_ref)
+        roles_ref = self.assignment_api.list_grants(user_id=user1['id'],
+                                                    project_id=project1['id'])
+        self.assertEqual(2, len(roles_ref))
+        self.assertIn(role_list[4], roles_ref)
+        self.assertIn(role_list[5], roles_ref)
+        roles_ref = self.assignment_api.list_grants(group_id=group1['id'],
+                                                    project_id=project1['id'])
+        self.assertEqual(2, len(roles_ref))
+        self.assertIn(role_list[6], roles_ref)
+        self.assertIn(role_list[7], roles_ref)
+
+        # Now test the alternate way of getting back lists of grants,
+        # where user and group roles are combined.  These should match
+        # the above results.
+        combined_list = self.assignment_api.get_roles_for_user_and_project(
+            user1['id'], project1['id'])
+        self.assertEqual(4, len(combined_list))
+        self.assertIn(role_list[4]['id'], combined_list)
+        self.assertIn(role_list[5]['id'], combined_list)
+        self.assertIn(role_list[6]['id'], combined_list)
+        self.assertIn(role_list[7]['id'], combined_list)
+
+        combined_role_list = self.assignment_api.get_roles_for_user_and_domain(
+            user1['id'], domain1['id'])
+        self.assertEqual(4, len(combined_role_list))
+        self.assertIn(role_list[0]['id'], combined_role_list)
+        self.assertIn(role_list[1]['id'], combined_role_list)
+        self.assertIn(role_list[2]['id'], combined_role_list)
+        self.assertIn(role_list[3]['id'], combined_role_list)
+
+    def test_multi_group_grants_on_project_domain(self):
+        """Test multiple group roles for user on project and domain.
+
+        Test Plan:
+
+        - Create 6 roles
+        - Create a domain, with a project, user and two groups
+        - Make the user a member of both groups
+        - Check no roles yet exit
+        - Assign a role to each user and both groups on both the
+          project and domain
+        - Get a list of effective roles for the user on both the
+          project and domain, checking we get back the correct three
+          roles
+
+        """
+        role_list = []
+        for _ in range(6):
+            role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+            self.role_api.create_role(role['id'], role)
+            role_list.append(role)
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        user1 = {'name': uuid.uuid4().hex, 'domain_id': domain1['id'],
+                 'password': uuid.uuid4().hex, 'enabled': True}
+        user1 = self.identity_api.create_user(user1)
+        group1 = {'name': uuid.uuid4().hex, 'domain_id': domain1['id'],
+                  'enabled': True}
+        group1 = self.identity_api.create_group(group1)
+        group2 = {'name': uuid.uuid4().hex, 'domain_id': domain1['id'],
+                  'enabled': True}
+        group2 = self.identity_api.create_group(group2)
+        project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain1['id']}
+        self.resource_api.create_project(project1['id'], project1)
+
+        self.identity_api.add_user_to_group(user1['id'],
+                                            group1['id'])
+        self.identity_api.add_user_to_group(user1['id'],
+                                            group2['id'])
+
+        roles_ref = self.assignment_api.list_grants(
+            user_id=user1['id'],
+            project_id=project1['id'])
+        self.assertEqual(0, len(roles_ref))
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role_list[0]['id'])
+        self.assignment_api.create_grant(group_id=group1['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role_list[1]['id'])
+        self.assignment_api.create_grant(group_id=group2['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role_list[2]['id'])
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         project_id=project1['id'],
+                                         role_id=role_list[3]['id'])
+        self.assignment_api.create_grant(group_id=group1['id'],
+                                         project_id=project1['id'],
+                                         role_id=role_list[4]['id'])
+        self.assignment_api.create_grant(group_id=group2['id'],
+                                         project_id=project1['id'],
+                                         role_id=role_list[5]['id'])
+
+        # Read by the roles, ensuring we get the correct 3 roles for
+        # both project and domain
+        combined_list = self.assignment_api.get_roles_for_user_and_project(
+            user1['id'], project1['id'])
+        self.assertEqual(3, len(combined_list))
+        self.assertIn(role_list[3]['id'], combined_list)
+        self.assertIn(role_list[4]['id'], combined_list)
+        self.assertIn(role_list[5]['id'], combined_list)
+
+        combined_role_list = self.assignment_api.get_roles_for_user_and_domain(
+            user1['id'], domain1['id'])
+        self.assertEqual(3, len(combined_role_list))
+        self.assertIn(role_list[0]['id'], combined_role_list)
+        self.assertIn(role_list[1]['id'], combined_role_list)
+        self.assertIn(role_list[2]['id'], combined_role_list)
+
+    def test_delete_role_with_user_and_group_grants(self):
+        role1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.role_api.create_role(role1['id'], role1)
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain1['id']}
+        self.resource_api.create_project(project1['id'], project1)
+        user1 = {'name': uuid.uuid4().hex, 'domain_id': domain1['id'],
+                 'password': uuid.uuid4().hex, 'enabled': True}
+        user1 = self.identity_api.create_user(user1)
+        group1 = {'name': uuid.uuid4().hex, 'domain_id': domain1['id'],
+                  'enabled': True}
+        group1 = self.identity_api.create_group(group1)
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         project_id=project1['id'],
+                                         role_id=role1['id'])
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role1['id'])
+        self.assignment_api.create_grant(group_id=group1['id'],
+                                         project_id=project1['id'],
+                                         role_id=role1['id'])
+        self.assignment_api.create_grant(group_id=group1['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role1['id'])
+        roles_ref = self.assignment_api.list_grants(
+            user_id=user1['id'],
+            project_id=project1['id'])
+        self.assertEqual(1, len(roles_ref))
+        roles_ref = self.assignment_api.list_grants(
+            group_id=group1['id'],
+            project_id=project1['id'])
+        self.assertEqual(1, len(roles_ref))
+        roles_ref = self.assignment_api.list_grants(
+            user_id=user1['id'],
+            domain_id=domain1['id'])
+        self.assertEqual(1, len(roles_ref))
+        roles_ref = self.assignment_api.list_grants(
+            group_id=group1['id'],
+            domain_id=domain1['id'])
+        self.assertEqual(1, len(roles_ref))
+        self.role_api.delete_role(role1['id'])
+        roles_ref = self.assignment_api.list_grants(
+            user_id=user1['id'],
+            project_id=project1['id'])
+        self.assertEqual(0, len(roles_ref))
+        roles_ref = self.assignment_api.list_grants(
+            group_id=group1['id'],
+            project_id=project1['id'])
+        self.assertEqual(0, len(roles_ref))
+        roles_ref = self.assignment_api.list_grants(
+            user_id=user1['id'],
+            domain_id=domain1['id'])
+        self.assertEqual(0, len(roles_ref))
+        roles_ref = self.assignment_api.list_grants(
+            group_id=group1['id'],
+            domain_id=domain1['id'])
+        self.assertEqual(0, len(roles_ref))
+
+    def test_delete_user_with_group_project_domain_links(self):
+        role1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.role_api.create_role(role1['id'], role1)
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain1['id']}
+        self.resource_api.create_project(project1['id'], project1)
+        user1 = {'name': uuid.uuid4().hex, 'domain_id': domain1['id'],
+                 'password': uuid.uuid4().hex, 'enabled': True}
+        user1 = self.identity_api.create_user(user1)
+        group1 = {'name': uuid.uuid4().hex, 'domain_id': domain1['id'],
+                  'enabled': True}
+        group1 = self.identity_api.create_group(group1)
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         project_id=project1['id'],
+                                         role_id=role1['id'])
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role1['id'])
+        self.identity_api.add_user_to_group(user_id=user1['id'],
+                                            group_id=group1['id'])
+        roles_ref = self.assignment_api.list_grants(
+            user_id=user1['id'],
+            project_id=project1['id'])
+        self.assertEqual(1, len(roles_ref))
+        roles_ref = self.assignment_api.list_grants(
+            user_id=user1['id'],
+            domain_id=domain1['id'])
+        self.assertEqual(1, len(roles_ref))
+        self.identity_api.check_user_in_group(
+            user_id=user1['id'],
+            group_id=group1['id'])
+        self.identity_api.delete_user(user1['id'])
+        self.assertRaises(exception.NotFound,
+                          self.identity_api.check_user_in_group,
+                          user1['id'],
+                          group1['id'])
+
+    def test_delete_group_with_user_project_domain_links(self):
+        role1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.role_api.create_role(role1['id'], role1)
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain1['id']}
+        self.resource_api.create_project(project1['id'], project1)
+        user1 = {'name': uuid.uuid4().hex, 'domain_id': domain1['id'],
+                 'password': uuid.uuid4().hex, 'enabled': True}
+        user1 = self.identity_api.create_user(user1)
+        group1 = {'name': uuid.uuid4().hex, 'domain_id': domain1['id'],
+                  'enabled': True}
+        group1 = self.identity_api.create_group(group1)
+
+        self.assignment_api.create_grant(group_id=group1['id'],
+                                         project_id=project1['id'],
+                                         role_id=role1['id'])
+        self.assignment_api.create_grant(group_id=group1['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role1['id'])
+        self.identity_api.add_user_to_group(user_id=user1['id'],
+                                            group_id=group1['id'])
+        roles_ref = self.assignment_api.list_grants(
+            group_id=group1['id'],
+            project_id=project1['id'])
+        self.assertEqual(1, len(roles_ref))
+        roles_ref = self.assignment_api.list_grants(
+            group_id=group1['id'],
+            domain_id=domain1['id'])
+        self.assertEqual(1, len(roles_ref))
+        self.identity_api.check_user_in_group(
+            user_id=user1['id'],
+            group_id=group1['id'])
+        self.identity_api.delete_group(group1['id'])
+        self.identity_api.get_user(user1['id'])
+
+    def test_delete_domain_with_user_group_project_links(self):
+        # TODO(chungg):add test case once expected behaviour defined
+        pass
+
+    def test_add_user_to_project(self):
+        self.assignment_api.add_user_to_project(self.tenant_baz['id'],
+                                                self.user_foo['id'])
+        tenants = self.assignment_api.list_projects_for_user(
+            self.user_foo['id'])
+        self.assertIn(self.tenant_baz, tenants)
+
+    def test_add_user_to_project_missing_default_role(self):
+        self.role_api.delete_role(CONF.member_role_id)
+        self.assertRaises(exception.RoleNotFound,
+                          self.role_api.get_role,
+                          CONF.member_role_id)
+        self.assignment_api.add_user_to_project(self.tenant_baz['id'],
+                                                self.user_foo['id'])
+        tenants = (
+            self.assignment_api.list_projects_for_user(self.user_foo['id']))
+        self.assertIn(self.tenant_baz, tenants)
+        default_role = self.role_api.get_role(CONF.member_role_id)
+        self.assertIsNotNone(default_role)
+
+    def test_add_user_to_project_404(self):
+        self.assertRaises(exception.ProjectNotFound,
+                          self.assignment_api.add_user_to_project,
+                          uuid.uuid4().hex,
+                          self.user_foo['id'])
+
+    def test_add_user_to_project_no_user(self):
+        # If add_user_to_project and the user doesn't exist, then
+        # no error.
+        user_id_not_exist = uuid.uuid4().hex
+        self.assignment_api.add_user_to_project(self.tenant_bar['id'],
+                                                user_id_not_exist)
+
+    def test_remove_user_from_project(self):
+        self.assignment_api.add_user_to_project(self.tenant_baz['id'],
+                                                self.user_foo['id'])
+        self.assignment_api.remove_user_from_project(self.tenant_baz['id'],
+                                                     self.user_foo['id'])
+        tenants = self.assignment_api.list_projects_for_user(
+            self.user_foo['id'])
+        self.assertNotIn(self.tenant_baz, tenants)
+
+    def test_remove_user_from_project_race_delete_role(self):
+        self.assignment_api.add_user_to_project(self.tenant_baz['id'],
+                                                self.user_foo['id'])
+        self.assignment_api.add_role_to_user_and_project(
+            tenant_id=self.tenant_baz['id'],
+            user_id=self.user_foo['id'],
+            role_id=self.role_other['id'])
+
+        # Mock a race condition, delete a role after
+        # get_roles_for_user_and_project() is called in
+        # remove_user_from_project().
+        roles = self.assignment_api.get_roles_for_user_and_project(
+            self.user_foo['id'], self.tenant_baz['id'])
+        self.role_api.delete_role(self.role_other['id'])
+        self.assignment_api.get_roles_for_user_and_project = mock.Mock(
+            return_value=roles)
+        self.assignment_api.remove_user_from_project(self.tenant_baz['id'],
+                                                     self.user_foo['id'])
+        tenants = self.assignment_api.list_projects_for_user(
+            self.user_foo['id'])
+        self.assertNotIn(self.tenant_baz, tenants)
+
+    def test_remove_user_from_project_404(self):
+        self.assertRaises(exception.ProjectNotFound,
+                          self.assignment_api.remove_user_from_project,
+                          uuid.uuid4().hex,
+                          self.user_foo['id'])
+
+        self.assertRaises(exception.UserNotFound,
+                          self.assignment_api.remove_user_from_project,
+                          self.tenant_bar['id'],
+                          uuid.uuid4().hex)
+
+        self.assertRaises(exception.NotFound,
+                          self.assignment_api.remove_user_from_project,
+                          self.tenant_baz['id'],
+                          self.user_foo['id'])
+
+    def test_list_user_project_ids_404(self):
+        self.assertRaises(exception.UserNotFound,
+                          self.assignment_api.list_projects_for_user,
+                          uuid.uuid4().hex)
+
+    def test_update_project_404(self):
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.update_project,
+                          uuid.uuid4().hex,
+                          dict())
+
+    def test_delete_project_404(self):
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.delete_project,
+                          uuid.uuid4().hex)
+
+    def test_update_user_404(self):
+        user_id = uuid.uuid4().hex
+        self.assertRaises(exception.UserNotFound,
+                          self.identity_api.update_user,
+                          user_id,
+                          {'id': user_id,
+                           'domain_id': DEFAULT_DOMAIN_ID})
+
+    def test_delete_user_with_project_association(self):
+        user = {'name': uuid.uuid4().hex,
+                'domain_id': DEFAULT_DOMAIN_ID,
+                'password': uuid.uuid4().hex}
+        user = self.identity_api.create_user(user)
+        self.assignment_api.add_user_to_project(self.tenant_bar['id'],
+                                                user['id'])
+        self.identity_api.delete_user(user['id'])
+        self.assertRaises(exception.UserNotFound,
+                          self.assignment_api.list_projects_for_user,
+                          user['id'])
+
+    def test_delete_user_with_project_roles(self):
+        user = {'name': uuid.uuid4().hex,
+                'domain_id': DEFAULT_DOMAIN_ID,
+                'password': uuid.uuid4().hex}
+        user = self.identity_api.create_user(user)
+        self.assignment_api.add_role_to_user_and_project(
+            user['id'],
+            self.tenant_bar['id'],
+            self.role_member['id'])
+        self.identity_api.delete_user(user['id'])
+        self.assertRaises(exception.UserNotFound,
+                          self.assignment_api.list_projects_for_user,
+                          user['id'])
+
+    def test_delete_user_404(self):
+        self.assertRaises(exception.UserNotFound,
+                          self.identity_api.delete_user,
+                          uuid.uuid4().hex)
+
+    def test_delete_role_404(self):
+        self.assertRaises(exception.RoleNotFound,
+                          self.role_api.delete_role,
+                          uuid.uuid4().hex)
+
+    def test_create_update_delete_unicode_project(self):
+        unicode_project_name = u'name \u540d\u5b57'
+        project = {'id': uuid.uuid4().hex,
+                   'name': unicode_project_name,
+                   'description': uuid.uuid4().hex,
+                   'domain_id': CONF.identity.default_domain_id}
+        self.resource_api.create_project(project['id'], project)
+        self.resource_api.update_project(project['id'], project)
+        self.resource_api.delete_project(project['id'])
+
+    def test_create_project_with_no_enabled_field(self):
+        ref = {
+            'id': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex.lower(),
+            'domain_id': DEFAULT_DOMAIN_ID}
+        self.resource_api.create_project(ref['id'], ref)
+
+        project = self.resource_api.get_project(ref['id'])
+        self.assertIs(project['enabled'], True)
+
+    def test_create_project_long_name_fails(self):
+        tenant = {'id': 'fake1', 'name': 'a' * 65,
+                  'domain_id': DEFAULT_DOMAIN_ID}
+        self.assertRaises(exception.ValidationError,
+                          self.resource_api.create_project,
+                          tenant['id'],
+                          tenant)
+
+    def test_create_project_blank_name_fails(self):
+        tenant = {'id': 'fake1', 'name': '',
+                  'domain_id': DEFAULT_DOMAIN_ID}
+        self.assertRaises(exception.ValidationError,
+                          self.resource_api.create_project,
+                          tenant['id'],
+                          tenant)
+
+    def test_create_project_invalid_name_fails(self):
+        tenant = {'id': 'fake1', 'name': None,
+                  'domain_id': DEFAULT_DOMAIN_ID}
+        self.assertRaises(exception.ValidationError,
+                          self.resource_api.create_project,
+                          tenant['id'],
+                          tenant)
+        tenant = {'id': 'fake1', 'name': 123,
+                  'domain_id': DEFAULT_DOMAIN_ID}
+        self.assertRaises(exception.ValidationError,
+                          self.resource_api.create_project,
+                          tenant['id'],
+                          tenant)
+
+    def test_update_project_blank_name_fails(self):
+        tenant = {'id': 'fake1', 'name': 'fake1',
+                  'domain_id': DEFAULT_DOMAIN_ID}
+        self.resource_api.create_project('fake1', tenant)
+        tenant['name'] = ''
+        self.assertRaises(exception.ValidationError,
+                          self.resource_api.update_project,
+                          tenant['id'],
+                          tenant)
+
+    def test_update_project_long_name_fails(self):
+        tenant = {'id': 'fake1', 'name': 'fake1',
+                  'domain_id': DEFAULT_DOMAIN_ID}
+        self.resource_api.create_project('fake1', tenant)
+        tenant['name'] = 'a' * 65
+        self.assertRaises(exception.ValidationError,
+                          self.resource_api.update_project,
+                          tenant['id'],
+                          tenant)
+
+    def test_update_project_invalid_name_fails(self):
+        tenant = {'id': 'fake1', 'name': 'fake1',
+                  'domain_id': DEFAULT_DOMAIN_ID}
+        self.resource_api.create_project('fake1', tenant)
+        tenant['name'] = None
+        self.assertRaises(exception.ValidationError,
+                          self.resource_api.update_project,
+                          tenant['id'],
+                          tenant)
+
+        tenant['name'] = 123
+        self.assertRaises(exception.ValidationError,
+                          self.resource_api.update_project,
+                          tenant['id'],
+                          tenant)
+
+    def test_create_user_long_name_fails(self):
+        user = {'name': 'a' * 256,
+                'domain_id': DEFAULT_DOMAIN_ID}
+        self.assertRaises(exception.ValidationError,
+                          self.identity_api.create_user,
+                          user)
+
+    def test_create_user_blank_name_fails(self):
+        user = {'name': '',
+                'domain_id': DEFAULT_DOMAIN_ID}
+        self.assertRaises(exception.ValidationError,
+                          self.identity_api.create_user,
+                          user)
+
+    def test_create_user_missed_password(self):
+        user = {'name': 'fake1',
+                'domain_id': DEFAULT_DOMAIN_ID}
+        user = self.identity_api.create_user(user)
+        self.identity_api.get_user(user['id'])
+        # Make sure  the user is not allowed to login
+        # with a password that  is empty string or None
+        self.assertRaises(AssertionError,
+                          self.identity_api.authenticate,
+                          context={},
+                          user_id=user['id'],
+                          password='')
+        self.assertRaises(AssertionError,
+                          self.identity_api.authenticate,
+                          context={},
+                          user_id=user['id'],
+                          password=None)
+
+    def test_create_user_none_password(self):
+        user = {'name': 'fake1', 'password': None,
+                'domain_id': DEFAULT_DOMAIN_ID}
+        user = self.identity_api.create_user(user)
+        self.identity_api.get_user(user['id'])
+        # Make sure  the user is not allowed to login
+        # with a password that  is empty string or None
+        self.assertRaises(AssertionError,
+                          self.identity_api.authenticate,
+                          context={},
+                          user_id=user['id'],
+                          password='')
+        self.assertRaises(AssertionError,
+                          self.identity_api.authenticate,
+                          context={},
+                          user_id=user['id'],
+                          password=None)
+
+    def test_create_user_invalid_name_fails(self):
+        user = {'name': None,
+                'domain_id': DEFAULT_DOMAIN_ID}
+        self.assertRaises(exception.ValidationError,
+                          self.identity_api.create_user,
+                          user)
+
+        user = {'name': 123,
+                'domain_id': DEFAULT_DOMAIN_ID}
+        self.assertRaises(exception.ValidationError,
+                          self.identity_api.create_user,
+                          user)
+
+    def test_update_project_invalid_enabled_type_string(self):
+        project = {'id': uuid.uuid4().hex,
+                   'name': uuid.uuid4().hex,
+                   'enabled': True,
+                   'domain_id': DEFAULT_DOMAIN_ID}
+        self.resource_api.create_project(project['id'], project)
+        project_ref = self.resource_api.get_project(project['id'])
+        self.assertEqual(True, project_ref['enabled'])
+
+        # Strings are not valid boolean values
+        project['enabled'] = "false"
+        self.assertRaises(exception.ValidationError,
+                          self.resource_api.update_project,
+                          project['id'],
+                          project)
+
+    def test_create_project_invalid_enabled_type_string(self):
+        project = {'id': uuid.uuid4().hex,
+                   'name': uuid.uuid4().hex,
+                   'domain_id': DEFAULT_DOMAIN_ID,
+                   # invalid string value
+                   'enabled': "true"}
+        self.assertRaises(exception.ValidationError,
+                          self.resource_api.create_project,
+                          project['id'],
+                          project)
+
+    def test_create_user_invalid_enabled_type_string(self):
+        user = {'name': uuid.uuid4().hex,
+                'domain_id': DEFAULT_DOMAIN_ID,
+                'password': uuid.uuid4().hex,
+                # invalid string value
+                'enabled': "true"}
+        self.assertRaises(exception.ValidationError,
+                          self.identity_api.create_user,
+                          user)
+
+    def test_update_user_long_name_fails(self):
+        user = {'name': 'fake1',
+                'domain_id': DEFAULT_DOMAIN_ID}
+        user = self.identity_api.create_user(user)
+        user['name'] = 'a' * 256
+        self.assertRaises(exception.ValidationError,
+                          self.identity_api.update_user,
+                          user['id'],
+                          user)
+
+    def test_update_user_blank_name_fails(self):
+        user = {'name': 'fake1',
+                'domain_id': DEFAULT_DOMAIN_ID}
+        user = self.identity_api.create_user(user)
+        user['name'] = ''
+        self.assertRaises(exception.ValidationError,
+                          self.identity_api.update_user,
+                          user['id'],
+                          user)
+
+    def test_update_user_invalid_name_fails(self):
+        user = {'name': 'fake1',
+                'domain_id': DEFAULT_DOMAIN_ID}
+        user = self.identity_api.create_user(user)
+
+        user['name'] = None
+        self.assertRaises(exception.ValidationError,
+                          self.identity_api.update_user,
+                          user['id'],
+                          user)
+
+        user['name'] = 123
+        self.assertRaises(exception.ValidationError,
+                          self.identity_api.update_user,
+                          user['id'],
+                          user)
+
+    def test_list_users(self):
+        users = self.identity_api.list_users(
+            domain_scope=self._set_domain_scope(DEFAULT_DOMAIN_ID))
+        self.assertEqual(len(default_fixtures.USERS), len(users))
+        user_ids = set(user['id'] for user in users)
+        expected_user_ids = set(getattr(self, 'user_%s' % user['id'])['id']
+                                for user in default_fixtures.USERS)
+        for user_ref in users:
+            self.assertNotIn('password', user_ref)
+        self.assertEqual(expected_user_ids, user_ids)
+
+    def test_list_groups(self):
+        group1 = {
+            'domain_id': DEFAULT_DOMAIN_ID,
+            'name': uuid.uuid4().hex}
+        group2 = {
+            'domain_id': DEFAULT_DOMAIN_ID,
+            'name': uuid.uuid4().hex}
+        group1 = self.identity_api.create_group(group1)
+        group2 = self.identity_api.create_group(group2)
+        groups = self.identity_api.list_groups(
+            domain_scope=self._set_domain_scope(DEFAULT_DOMAIN_ID))
+        self.assertEqual(2, len(groups))
+        group_ids = []
+        for group in groups:
+            group_ids.append(group.get('id'))
+        self.assertIn(group1['id'], group_ids)
+        self.assertIn(group2['id'], group_ids)
+
+    def test_list_domains(self):
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        self.resource_api.create_domain(domain2['id'], domain2)
+        domains = self.resource_api.list_domains()
+        self.assertEqual(3, len(domains))
+        domain_ids = []
+        for domain in domains:
+            domain_ids.append(domain.get('id'))
+        self.assertIn(DEFAULT_DOMAIN_ID, domain_ids)
+        self.assertIn(domain1['id'], domain_ids)
+        self.assertIn(domain2['id'], domain_ids)
+
+    def test_list_projects(self):
+        projects = self.resource_api.list_projects()
+        self.assertEqual(4, len(projects))
+        project_ids = []
+        for project in projects:
+            project_ids.append(project.get('id'))
+        self.assertIn(self.tenant_bar['id'], project_ids)
+        self.assertIn(self.tenant_baz['id'], project_ids)
+
+    def test_list_projects_with_multiple_filters(self):
+        # Create a project
+        project = {'id': uuid.uuid4().hex, 'domain_id': DEFAULT_DOMAIN_ID,
+                   'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex,
+                   'enabled': True, 'parent_id': None}
+        self.resource_api.create_project(project['id'], project)
+
+        # Build driver hints with the project's name and inexistent description
+        hints = driver_hints.Hints()
+        hints.add_filter('name', project['name'])
+        hints.add_filter('description', uuid.uuid4().hex)
+
+        # Retrieve projects based on hints and check an empty list is returned
+        projects = self.resource_api.list_projects(hints)
+        self.assertEqual([], projects)
+
+        # Build correct driver hints
+        hints = driver_hints.Hints()
+        hints.add_filter('name', project['name'])
+        hints.add_filter('description', project['description'])
+
+        # Retrieve projects based on hints
+        projects = self.resource_api.list_projects(hints)
+
+        # Check that the returned list contains only the first project
+        self.assertEqual(1, len(projects))
+        self.assertEqual(project, projects[0])
+
+    def test_list_projects_for_domain(self):
+        project_ids = ([x['id'] for x in
+                       self.resource_api.list_projects_in_domain(
+                           DEFAULT_DOMAIN_ID)])
+        self.assertEqual(4, len(project_ids))
+        self.assertIn(self.tenant_bar['id'], project_ids)
+        self.assertIn(self.tenant_baz['id'], project_ids)
+        self.assertIn(self.tenant_mtu['id'], project_ids)
+        self.assertIn(self.tenant_service['id'], project_ids)
+
+    @tests.skip_if_no_multiple_domains_support
+    def test_list_projects_for_alternate_domain(self):
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain1['id']}
+        self.resource_api.create_project(project1['id'], project1)
+        project2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain1['id']}
+        self.resource_api.create_project(project2['id'], project2)
+        project_ids = ([x['id'] for x in
+                       self.resource_api.list_projects_in_domain(
+                           domain1['id'])])
+        self.assertEqual(2, len(project_ids))
+        self.assertIn(project1['id'], project_ids)
+        self.assertIn(project2['id'], project_ids)
+
+    def _create_projects_hierarchy(self, hierarchy_size=2,
+                                   domain_id=DEFAULT_DOMAIN_ID):
+        """Creates a project hierarchy with specified size.
+
+        :param hierarchy_size: the desired hierarchy size, default is 2 -
+                               a project with one child.
+        :param domain_id: domain where the projects hierarchy will be created.
+
+        :returns projects: a list of the projects in the created hierarchy.
+
+        """
+        project_id = uuid.uuid4().hex
+        project = {'id': project_id,
+                   'description': '',
+                   'domain_id': domain_id,
+                   'enabled': True,
+                   'name': uuid.uuid4().hex,
+                   'parent_id': None}
+        self.resource_api.create_project(project_id, project)
+
+        projects = [project]
+        for i in range(1, hierarchy_size):
+            new_project = {'id': uuid.uuid4().hex,
+                           'description': '',
+                           'domain_id': domain_id,
+                           'enabled': True,
+                           'name': uuid.uuid4().hex,
+                           'parent_id': project_id}
+            self.resource_api.create_project(new_project['id'], new_project)
+            projects.append(new_project)
+            project_id = new_project['id']
+
+        return projects
+
+    def test_check_leaf_projects(self):
+        projects_hierarchy = self._create_projects_hierarchy()
+        root_project = projects_hierarchy[0]
+        leaf_project = projects_hierarchy[1]
+
+        self.assertFalse(self.resource_api.is_leaf_project(
+            root_project['id']))
+        self.assertTrue(self.resource_api.is_leaf_project(
+            leaf_project['id']))
+
+        # Delete leaf_project
+        self.resource_api.delete_project(leaf_project['id'])
+
+        # Now, root_project should be leaf
+        self.assertTrue(self.resource_api.is_leaf_project(
+            root_project['id']))
+
+    def test_list_projects_in_subtree(self):
+        projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3)
+        project1 = projects_hierarchy[0]
+        project2 = projects_hierarchy[1]
+        project3 = projects_hierarchy[2]
+        project4 = {'id': uuid.uuid4().hex,
+                    'description': '',
+                    'domain_id': DEFAULT_DOMAIN_ID,
+                    'enabled': True,
+                    'name': uuid.uuid4().hex,
+                    'parent_id': project2['id']}
+        self.resource_api.create_project(project4['id'], project4)
+
+        subtree = self.resource_api.list_projects_in_subtree(project1['id'])
+        self.assertEqual(3, len(subtree))
+        self.assertIn(project2, subtree)
+        self.assertIn(project3, subtree)
+        self.assertIn(project4, subtree)
+
+        subtree = self.resource_api.list_projects_in_subtree(project2['id'])
+        self.assertEqual(2, len(subtree))
+        self.assertIn(project3, subtree)
+        self.assertIn(project4, subtree)
+
+        subtree = self.resource_api.list_projects_in_subtree(project3['id'])
+        self.assertEqual(0, len(subtree))
+
+    def test_list_project_parents(self):
+        projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3)
+        project1 = projects_hierarchy[0]
+        project2 = projects_hierarchy[1]
+        project3 = projects_hierarchy[2]
+        project4 = {'id': uuid.uuid4().hex,
+                    'description': '',
+                    'domain_id': DEFAULT_DOMAIN_ID,
+                    'enabled': True,
+                    'name': uuid.uuid4().hex,
+                    'parent_id': project2['id']}
+        self.resource_api.create_project(project4['id'], project4)
+
+        parents1 = self.resource_api.list_project_parents(project3['id'])
+        self.assertEqual(2, len(parents1))
+        self.assertIn(project1, parents1)
+        self.assertIn(project2, parents1)
+
+        parents2 = self.resource_api.list_project_parents(project4['id'])
+        self.assertEqual(parents1, parents2)
+
+        parents = self.resource_api.list_project_parents(project1['id'])
+        self.assertEqual(0, len(parents))
+
+    def test_delete_project_with_role_assignments(self):
+        tenant = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                  'domain_id': DEFAULT_DOMAIN_ID}
+        self.resource_api.create_project(tenant['id'], tenant)
+        self.assignment_api.add_role_to_user_and_project(
+            self.user_foo['id'], tenant['id'], 'member')
+        self.resource_api.delete_project(tenant['id'])
+        self.assertRaises(exception.NotFound,
+                          self.resource_api.get_project,
+                          tenant['id'])
+
+    def test_delete_role_check_role_grant(self):
+        role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        alt_role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.role_api.create_role(role['id'], role)
+        self.role_api.create_role(alt_role['id'], alt_role)
+        self.assignment_api.add_role_to_user_and_project(
+            self.user_foo['id'], self.tenant_bar['id'], role['id'])
+        self.assignment_api.add_role_to_user_and_project(
+            self.user_foo['id'], self.tenant_bar['id'], alt_role['id'])
+        self.role_api.delete_role(role['id'])
+        roles_ref = self.assignment_api.get_roles_for_user_and_project(
+            self.user_foo['id'], self.tenant_bar['id'])
+        self.assertNotIn(role['id'], roles_ref)
+        self.assertIn(alt_role['id'], roles_ref)
+
+    def test_create_project_doesnt_modify_passed_in_dict(self):
+        new_project = {'id': 'tenant_id', 'name': uuid.uuid4().hex,
+                       'domain_id': DEFAULT_DOMAIN_ID}
+        original_project = new_project.copy()
+        self.resource_api.create_project('tenant_id', new_project)
+        self.assertDictEqual(original_project, new_project)
+
+    def test_create_user_doesnt_modify_passed_in_dict(self):
+        new_user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+                    'domain_id': DEFAULT_DOMAIN_ID}
+        original_user = new_user.copy()
+        self.identity_api.create_user(new_user)
+        self.assertDictEqual(original_user, new_user)
+
+    def test_update_user_enable(self):
+        user = {'name': 'fake1', 'enabled': True,
+                'domain_id': DEFAULT_DOMAIN_ID}
+        user = self.identity_api.create_user(user)
+        user_ref = self.identity_api.get_user(user['id'])
+        self.assertEqual(True, user_ref['enabled'])
+
+        user['enabled'] = False
+        self.identity_api.update_user(user['id'], user)
+        user_ref = self.identity_api.get_user(user['id'])
+        self.assertEqual(user['enabled'], user_ref['enabled'])
+
+        # If not present, enabled field should not be updated
+        del user['enabled']
+        self.identity_api.update_user(user['id'], user)
+        user_ref = self.identity_api.get_user(user['id'])
+        self.assertEqual(False, user_ref['enabled'])
+
+        user['enabled'] = True
+        self.identity_api.update_user(user['id'], user)
+        user_ref = self.identity_api.get_user(user['id'])
+        self.assertEqual(user['enabled'], user_ref['enabled'])
+
+        del user['enabled']
+        self.identity_api.update_user(user['id'], user)
+        user_ref = self.identity_api.get_user(user['id'])
+        self.assertEqual(True, user_ref['enabled'])
+
+        # Integers are valid Python's booleans. Explicitly test it.
+        user['enabled'] = 0
+        self.identity_api.update_user(user['id'], user)
+        user_ref = self.identity_api.get_user(user['id'])
+        self.assertEqual(False, user_ref['enabled'])
+
+        # Any integers other than 0 are interpreted as True
+        user['enabled'] = -42
+        self.identity_api.update_user(user['id'], user)
+        user_ref = self.identity_api.get_user(user['id'])
+        self.assertEqual(True, user_ref['enabled'])
+
+    def test_update_user_name(self):
+        user = {'name': uuid.uuid4().hex,
+                'enabled': True,
+                'domain_id': DEFAULT_DOMAIN_ID}
+        user = self.identity_api.create_user(user)
+        user_ref = self.identity_api.get_user(user['id'])
+        self.assertEqual(user['name'], user_ref['name'])
+
+        changed_name = user_ref['name'] + '_changed'
+        user_ref['name'] = changed_name
+        updated_user = self.identity_api.update_user(user_ref['id'], user_ref)
+
+        # NOTE(dstanek): the SQL backend adds an 'extra' field containing a
+        #                dictionary of the extra fields in addition to the
+        #                fields in the object. For the details see:
+        #                SqlIdentity.test_update_project_returns_extra
+        updated_user.pop('extra', None)
+
+        self.assertDictEqual(user_ref, updated_user)
+
+        user_ref = self.identity_api.get_user(user_ref['id'])
+        self.assertEqual(changed_name, user_ref['name'])
+
+    def test_update_user_enable_fails(self):
+        user = {'name': 'fake1', 'enabled': True,
+                'domain_id': DEFAULT_DOMAIN_ID}
+        user = self.identity_api.create_user(user)
+        user_ref = self.identity_api.get_user(user['id'])
+        self.assertEqual(True, user_ref['enabled'])
+
+        # Strings are not valid boolean values
+        user['enabled'] = "false"
+        self.assertRaises(exception.ValidationError,
+                          self.identity_api.update_user,
+                          user['id'],
+                          user)
+
+    def test_update_project_enable(self):
+        tenant = {'id': 'fake1', 'name': 'fake1', 'enabled': True,
+                  'domain_id': DEFAULT_DOMAIN_ID}
+        self.resource_api.create_project('fake1', tenant)
+        tenant_ref = self.resource_api.get_project('fake1')
+        self.assertEqual(True, tenant_ref['enabled'])
+
+        tenant['enabled'] = False
+        self.resource_api.update_project('fake1', tenant)
+        tenant_ref = self.resource_api.get_project('fake1')
+        self.assertEqual(tenant['enabled'], tenant_ref['enabled'])
+
+        # If not present, enabled field should not be updated
+        del tenant['enabled']
+        self.resource_api.update_project('fake1', tenant)
+        tenant_ref = self.resource_api.get_project('fake1')
+        self.assertEqual(False, tenant_ref['enabled'])
+
+        tenant['enabled'] = True
+        self.resource_api.update_project('fake1', tenant)
+        tenant_ref = self.resource_api.get_project('fake1')
+        self.assertEqual(tenant['enabled'], tenant_ref['enabled'])
+
+        del tenant['enabled']
+        self.resource_api.update_project('fake1', tenant)
+        tenant_ref = self.resource_api.get_project('fake1')
+        self.assertEqual(True, tenant_ref['enabled'])
+
+    def test_add_user_to_group(self):
+        domain = self._get_domain_fixture()
+        new_group = {'domain_id': domain['id'], 'name': uuid.uuid4().hex}
+        new_group = self.identity_api.create_group(new_group)
+        new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
+                    'enabled': True, 'domain_id': domain['id']}
+        new_user = self.identity_api.create_user(new_user)
+        self.identity_api.add_user_to_group(new_user['id'],
+                                            new_group['id'])
+        groups = self.identity_api.list_groups_for_user(new_user['id'])
+
+        found = False
+        for x in groups:
+            if (x['id'] == new_group['id']):
+                found = True
+        self.assertTrue(found)
+
+    def test_add_user_to_group_404(self):
+        domain = self._get_domain_fixture()
+        new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
+                    'enabled': True, 'domain_id': domain['id']}
+        new_user = self.identity_api.create_user(new_user)
+        self.assertRaises(exception.GroupNotFound,
+                          self.identity_api.add_user_to_group,
+                          new_user['id'],
+                          uuid.uuid4().hex)
+
+        new_group = {'domain_id': domain['id'], 'name': uuid.uuid4().hex}
+        new_group = self.identity_api.create_group(new_group)
+        self.assertRaises(exception.UserNotFound,
+                          self.identity_api.add_user_to_group,
+                          uuid.uuid4().hex,
+                          new_group['id'])
+
+        self.assertRaises(exception.NotFound,
+                          self.identity_api.add_user_to_group,
+                          uuid.uuid4().hex,
+                          uuid.uuid4().hex)
+
+    def test_check_user_in_group(self):
+        domain = self._get_domain_fixture()
+        new_group = {'domain_id': domain['id'], 'name': uuid.uuid4().hex}
+        new_group = self.identity_api.create_group(new_group)
+        new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
+                    'enabled': True, 'domain_id': domain['id']}
+        new_user = self.identity_api.create_user(new_user)
+        self.identity_api.add_user_to_group(new_user['id'],
+                                            new_group['id'])
+        self.identity_api.check_user_in_group(new_user['id'], new_group['id'])
+
+    def test_create_invalid_domain_fails(self):
+        new_group = {'domain_id': "doesnotexist", 'name': uuid.uuid4().hex}
+        self.assertRaises(exception.DomainNotFound,
+                          self.identity_api.create_group,
+                          new_group)
+        new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
+                    'enabled': True, 'domain_id': "doesnotexist"}
+        self.assertRaises(exception.DomainNotFound,
+                          self.identity_api.create_user,
+                          new_user)
+
+    def test_check_user_not_in_group(self):
+        new_group = {
+            'domain_id': DEFAULT_DOMAIN_ID,
+            'name': uuid.uuid4().hex}
+        new_group = self.identity_api.create_group(new_group)
+
+        new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
+                    'enabled': True, 'domain_id': DEFAULT_DOMAIN_ID}
+        new_user = self.identity_api.create_user(new_user)
+
+        self.assertRaises(exception.NotFound,
+                          self.identity_api.check_user_in_group,
+                          new_user['id'],
+                          new_group['id'])
+
+    def test_check_user_in_group_404(self):
+        new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
+                    'enabled': True, 'domain_id': DEFAULT_DOMAIN_ID}
+        new_user = self.identity_api.create_user(new_user)
+
+        new_group = {
+            'domain_id': DEFAULT_DOMAIN_ID,
+            'name': uuid.uuid4().hex}
+        new_group = self.identity_api.create_group(new_group)
+
+        self.assertRaises(exception.UserNotFound,
+                          self.identity_api.check_user_in_group,
+                          uuid.uuid4().hex,
+                          new_group['id'])
+
+        self.assertRaises(exception.GroupNotFound,
+                          self.identity_api.check_user_in_group,
+                          new_user['id'],
+                          uuid.uuid4().hex)
+
+        self.assertRaises(exception.NotFound,
+                          self.identity_api.check_user_in_group,
+                          uuid.uuid4().hex,
+                          uuid.uuid4().hex)
+
+    def test_list_users_in_group(self):
+        domain = self._get_domain_fixture()
+        new_group = {'domain_id': domain['id'], 'name': uuid.uuid4().hex}
+        new_group = self.identity_api.create_group(new_group)
+        # Make sure we get an empty list back on a new group, not an error.
+        user_refs = self.identity_api.list_users_in_group(new_group['id'])
+        self.assertEqual([], user_refs)
+        # Make sure we get the correct users back once they have been added
+        # to the group.
+        new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
+                    'enabled': True, 'domain_id': domain['id']}
+        new_user = self.identity_api.create_user(new_user)
+        self.identity_api.add_user_to_group(new_user['id'],
+                                            new_group['id'])
+        user_refs = self.identity_api.list_users_in_group(new_group['id'])
+        found = False
+        for x in user_refs:
+            if (x['id'] == new_user['id']):
+                found = True
+            self.assertNotIn('password', x)
+        self.assertTrue(found)
+
+    def test_list_users_in_group_404(self):
+        self.assertRaises(exception.GroupNotFound,
+                          self.identity_api.list_users_in_group,
+                          uuid.uuid4().hex)
+
+    def test_list_groups_for_user(self):
+        domain = self._get_domain_fixture()
+        test_groups = []
+        test_users = []
+        GROUP_COUNT = 3
+        USER_COUNT = 2
+
+        for x in range(0, USER_COUNT):
+            new_user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+                        'enabled': True, 'domain_id': domain['id']}
+            new_user = self.identity_api.create_user(new_user)
+            test_users.append(new_user)
+        positive_user = test_users[0]
+        negative_user = test_users[1]
+
+        for x in range(0, USER_COUNT):
+            group_refs = self.identity_api.list_groups_for_user(
+                test_users[x]['id'])
+            self.assertEqual(0, len(group_refs))
+
+        for x in range(0, GROUP_COUNT):
+            before_count = x
+            after_count = x + 1
+            new_group = {'domain_id': domain['id'],
+                         'name': uuid.uuid4().hex}
+            new_group = self.identity_api.create_group(new_group)
+            test_groups.append(new_group)
+
+            # add the user to the group and ensure that the
+            # group count increases by one for each
+            group_refs = self.identity_api.list_groups_for_user(
+                positive_user['id'])
+            self.assertEqual(before_count, len(group_refs))
+            self.identity_api.add_user_to_group(
+                positive_user['id'],
+                new_group['id'])
+            group_refs = self.identity_api.list_groups_for_user(
+                positive_user['id'])
+            self.assertEqual(after_count, len(group_refs))
+
+            # Make sure the group count for the unrelated user did not change
+            group_refs = self.identity_api.list_groups_for_user(
+                negative_user['id'])
+            self.assertEqual(0, len(group_refs))
+
+        # remove the user from each group and ensure that
+        # the group count reduces by one for each
+        for x in range(0, 3):
+            before_count = GROUP_COUNT - x
+            after_count = GROUP_COUNT - x - 1
+            group_refs = self.identity_api.list_groups_for_user(
+                positive_user['id'])
+            self.assertEqual(before_count, len(group_refs))
+            self.identity_api.remove_user_from_group(
+                positive_user['id'],
+                test_groups[x]['id'])
+            group_refs = self.identity_api.list_groups_for_user(
+                positive_user['id'])
+            self.assertEqual(after_count, len(group_refs))
+            # Make sure the group count for the unrelated user
+            # did not change
+            group_refs = self.identity_api.list_groups_for_user(
+                negative_user['id'])
+            self.assertEqual(0, len(group_refs))
+
+    def test_remove_user_from_group(self):
+        domain = self._get_domain_fixture()
+        new_group = {'domain_id': domain['id'], 'name': uuid.uuid4().hex}
+        new_group = self.identity_api.create_group(new_group)
+        new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
+                    'enabled': True, 'domain_id': domain['id']}
+        new_user = self.identity_api.create_user(new_user)
+        self.identity_api.add_user_to_group(new_user['id'],
+                                            new_group['id'])
+        groups = self.identity_api.list_groups_for_user(new_user['id'])
+        self.assertIn(new_group['id'], [x['id'] for x in groups])
+        self.identity_api.remove_user_from_group(new_user['id'],
+                                                 new_group['id'])
+        groups = self.identity_api.list_groups_for_user(new_user['id'])
+        self.assertNotIn(new_group['id'], [x['id'] for x in groups])
+
+    def test_remove_user_from_group_404(self):
+        domain = self._get_domain_fixture()
+        new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
+                    'enabled': True, 'domain_id': domain['id']}
+        new_user = self.identity_api.create_user(new_user)
+        new_group = {'domain_id': domain['id'], 'name': uuid.uuid4().hex}
+        new_group = self.identity_api.create_group(new_group)
+        self.assertRaises(exception.GroupNotFound,
+                          self.identity_api.remove_user_from_group,
+                          new_user['id'],
+                          uuid.uuid4().hex)
+
+        self.assertRaises(exception.UserNotFound,
+                          self.identity_api.remove_user_from_group,
+                          uuid.uuid4().hex,
+                          new_group['id'])
+
+        self.assertRaises(exception.NotFound,
+                          self.identity_api.remove_user_from_group,
+                          uuid.uuid4().hex,
+                          uuid.uuid4().hex)
+
+    def test_group_crud(self):
+        domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain['id'], domain)
+        group = {'domain_id': domain['id'], 'name': uuid.uuid4().hex}
+        group = self.identity_api.create_group(group)
+        group_ref = self.identity_api.get_group(group['id'])
+        self.assertDictContainsSubset(group, group_ref)
+
+        group['name'] = uuid.uuid4().hex
+        self.identity_api.update_group(group['id'], group)
+        group_ref = self.identity_api.get_group(group['id'])
+        self.assertDictContainsSubset(group, group_ref)
+
+        self.identity_api.delete_group(group['id'])
+        self.assertRaises(exception.GroupNotFound,
+                          self.identity_api.get_group,
+                          group['id'])
+
+    def test_get_group_by_name(self):
+        group_name = uuid.uuid4().hex
+        group = {'domain_id': DEFAULT_DOMAIN_ID, 'name': group_name}
+        group = self.identity_api.create_group(group)
+        spoiler = {'domain_id': DEFAULT_DOMAIN_ID, 'name': uuid.uuid4().hex}
+        self.identity_api.create_group(spoiler)
+
+        group_ref = self.identity_api.get_group_by_name(
+            group_name, DEFAULT_DOMAIN_ID)
+        self.assertDictEqual(group_ref, group)
+
+    def test_get_group_by_name_404(self):
+        self.assertRaises(exception.GroupNotFound,
+                          self.identity_api.get_group_by_name,
+                          uuid.uuid4().hex,
+                          DEFAULT_DOMAIN_ID)
+
+    @tests.skip_if_cache_disabled('identity')
+    def test_cache_layer_group_crud(self):
+        group = {'domain_id': DEFAULT_DOMAIN_ID, 'name': uuid.uuid4().hex}
+        group = self.identity_api.create_group(group)
+        # cache the result
+        group_ref = self.identity_api.get_group(group['id'])
+        # delete the group bypassing identity api.
+        domain_id, driver, entity_id = (
+            self.identity_api._get_domain_driver_and_entity_id(group['id']))
+        driver.delete_group(entity_id)
+
+        self.assertEqual(group_ref, self.identity_api.get_group(group['id']))
+        self.identity_api.get_group.invalidate(self.identity_api, group['id'])
+        self.assertRaises(exception.GroupNotFound,
+                          self.identity_api.get_group, group['id'])
+
+        group = {'domain_id': DEFAULT_DOMAIN_ID, 'name': uuid.uuid4().hex}
+        group = self.identity_api.create_group(group)
+        # cache the result
+        self.identity_api.get_group(group['id'])
+        group['name'] = uuid.uuid4().hex
+        group_ref = self.identity_api.update_group(group['id'], group)
+        # after updating through identity api, get updated group
+        self.assertDictContainsSubset(self.identity_api.get_group(group['id']),
+                                      group_ref)
+
+    def test_create_duplicate_group_name_fails(self):
+        group1 = {'domain_id': DEFAULT_DOMAIN_ID, 'name': uuid.uuid4().hex}
+        group2 = {'domain_id': DEFAULT_DOMAIN_ID, 'name': group1['name']}
+        group1 = self.identity_api.create_group(group1)
+        self.assertRaises(exception.Conflict,
+                          self.identity_api.create_group,
+                          group2)
+
+    def test_create_duplicate_group_name_in_different_domains(self):
+        new_domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(new_domain['id'], new_domain)
+        group1 = {'domain_id': DEFAULT_DOMAIN_ID, 'name': uuid.uuid4().hex}
+        group2 = {'domain_id': new_domain['id'], 'name': group1['name']}
+        group1 = self.identity_api.create_group(group1)
+        group2 = self.identity_api.create_group(group2)
+
+    def test_move_group_between_domains(self):
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain2['id'], domain2)
+        group = {'name': uuid.uuid4().hex,
+                 'domain_id': domain1['id']}
+        group = self.identity_api.create_group(group)
+        group['domain_id'] = domain2['id']
+        self.identity_api.update_group(group['id'], group)
+
+    def test_move_group_between_domains_with_clashing_names_fails(self):
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain2['id'], domain2)
+        # First, create a group in domain1
+        group1 = {'name': uuid.uuid4().hex,
+                  'domain_id': domain1['id']}
+        group1 = self.identity_api.create_group(group1)
+        # Now create a group in domain2 with a potentially clashing
+        # name - which should work since we have domain separation
+        group2 = {'name': group1['name'],
+                  'domain_id': domain2['id']}
+        group2 = self.identity_api.create_group(group2)
+        # Now try and move group1 into the 2nd domain - which should
+        # fail since the names clash
+        group1['domain_id'] = domain2['id']
+        self.assertRaises(exception.Conflict,
+                          self.identity_api.update_group,
+                          group1['id'],
+                          group1)
+
+    @tests.skip_if_no_multiple_domains_support
+    def test_project_crud(self):
+        domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                  'enabled': True}
+        self.resource_api.create_domain(domain['id'], domain)
+        project = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                   'domain_id': domain['id']}
+        self.resource_api.create_project(project['id'], project)
+        project_ref = self.resource_api.get_project(project['id'])
+        self.assertDictContainsSubset(project, project_ref)
+
+        project['name'] = uuid.uuid4().hex
+        self.resource_api.update_project(project['id'], project)
+        project_ref = self.resource_api.get_project(project['id'])
+        self.assertDictContainsSubset(project, project_ref)
+
+        self.resource_api.delete_project(project['id'])
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project,
+                          project['id'])
+
+    def test_domain_delete_hierarchy(self):
+        domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                  'enabled': True}
+        self.resource_api.create_domain(domain['id'], domain)
+
+        # Creating a root and a leaf project inside the domain
+        projects_hierarchy = self._create_projects_hierarchy(
+            domain_id=domain['id'])
+        root_project = projects_hierarchy[0]
+        leaf_project = projects_hierarchy[0]
+
+        # Disable the domain
+        domain['enabled'] = False
+        self.resource_api.update_domain(domain['id'], domain)
+
+        # Delete the domain
+        self.resource_api.delete_domain(domain['id'])
+
+        # Make sure the domain no longer exists
+        self.assertRaises(exception.DomainNotFound,
+                          self.resource_api.get_domain,
+                          domain['id'])
+
+        # Make sure the root project no longer exists
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project,
+                          root_project['id'])
+
+        # Make sure the leaf project no longer exists
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project,
+                          leaf_project['id'])
+
+    def test_hierarchical_projects_crud(self):
+        # create a hierarchy with just a root project (which is a leaf as well)
+        projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=1)
+        root_project1 = projects_hierarchy[0]
+
+        # create a hierarchy with one root project and one leaf project
+        projects_hierarchy = self._create_projects_hierarchy()
+        root_project2 = projects_hierarchy[0]
+        leaf_project = projects_hierarchy[1]
+
+        # update description from leaf_project
+        leaf_project['description'] = 'new description'
+        self.resource_api.update_project(leaf_project['id'], leaf_project)
+        proj_ref = self.resource_api.get_project(leaf_project['id'])
+        self.assertDictEqual(proj_ref, leaf_project)
+
+        # update the parent_id is not allowed
+        leaf_project['parent_id'] = root_project1['id']
+        self.assertRaises(exception.ForbiddenAction,
+                          self.resource_api.update_project,
+                          leaf_project['id'],
+                          leaf_project)
+
+        # delete root_project1
+        self.resource_api.delete_project(root_project1['id'])
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project,
+                          root_project1['id'])
+
+        # delete root_project2 is not allowed since it is not a leaf project
+        self.assertRaises(exception.ForbiddenAction,
+                          self.resource_api.delete_project,
+                          root_project2['id'])
+
+    def test_create_project_with_invalid_parent(self):
+        project = {'id': uuid.uuid4().hex,
+                   'name': uuid.uuid4().hex,
+                   'description': '',
+                   'domain_id': DEFAULT_DOMAIN_ID,
+                   'enabled': True,
+                   'parent_id': 'fake'}
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.create_project,
+                          project['id'],
+                          project)
+
+    def test_create_leaf_project_with_invalid_domain(self):
+        root_project = {'id': uuid.uuid4().hex,
+                        'name': uuid.uuid4().hex,
+                        'description': '',
+                        'domain_id': DEFAULT_DOMAIN_ID,
+                        'enabled': True,
+                        'parent_id': None}
+        self.resource_api.create_project(root_project['id'], root_project)
+
+        leaf_project = {'id': uuid.uuid4().hex,
+                        'name': uuid.uuid4().hex,
+                        'description': '',
+                        'domain_id': 'fake',
+                        'enabled': True,
+                        'parent_id': root_project['id']}
+
+        self.assertRaises(exception.ForbiddenAction,
+                          self.resource_api.create_project,
+                          leaf_project['id'],
+                          leaf_project)
+
+    def test_delete_hierarchical_leaf_project(self):
+        projects_hierarchy = self._create_projects_hierarchy()
+        root_project = projects_hierarchy[0]
+        leaf_project = projects_hierarchy[1]
+
+        self.resource_api.delete_project(leaf_project['id'])
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project,
+                          leaf_project['id'])
+
+        self.resource_api.delete_project(root_project['id'])
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project,
+                          root_project['id'])
+
+    def test_delete_hierarchical_not_leaf_project(self):
+        projects_hierarchy = self._create_projects_hierarchy()
+        root_project = projects_hierarchy[0]
+
+        self.assertRaises(exception.ForbiddenAction,
+                          self.resource_api.delete_project,
+                          root_project['id'])
+
+    def test_update_project_parent(self):
+        projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3)
+        project1 = projects_hierarchy[0]
+        project2 = projects_hierarchy[1]
+        project3 = projects_hierarchy[2]
+
+        # project2 is the parent from project3
+        self.assertEqual(project3.get('parent_id'), project2['id'])
+
+        # try to update project3 parent to parent1
+        project3['parent_id'] = project1['id']
+        self.assertRaises(exception.ForbiddenAction,
+                          self.resource_api.update_project,
+                          project3['id'],
+                          project3)
+
+    def test_create_project_under_disabled_one(self):
+        project1 = {'id': uuid.uuid4().hex,
+                    'name': uuid.uuid4().hex,
+                    'domain_id': DEFAULT_DOMAIN_ID,
+                    'enabled': False,
+                    'parent_id': None}
+        self.resource_api.create_project(project1['id'], project1)
+
+        project2 = {'id': uuid.uuid4().hex,
+                    'name': uuid.uuid4().hex,
+                    'domain_id': DEFAULT_DOMAIN_ID,
+                    'parent_id': project1['id']}
+
+        # It's not possible to create a project under a disabled one in the
+        # hierarchy
+        self.assertRaises(exception.ForbiddenAction,
+                          self.resource_api.create_project,
+                          project2['id'],
+                          project2)
+
+    def test_disable_hierarchical_leaf_project(self):
+        projects_hierarchy = self._create_projects_hierarchy()
+        leaf_project = projects_hierarchy[1]
+
+        leaf_project['enabled'] = False
+        self.resource_api.update_project(leaf_project['id'], leaf_project)
+
+        project_ref = self.resource_api.get_project(leaf_project['id'])
+        self.assertEqual(project_ref['enabled'], leaf_project['enabled'])
+
+    def test_disable_hierarchical_not_leaf_project(self):
+        projects_hierarchy = self._create_projects_hierarchy()
+        root_project = projects_hierarchy[0]
+
+        root_project['enabled'] = False
+        self.assertRaises(exception.ForbiddenAction,
+                          self.resource_api.update_project,
+                          root_project['id'],
+                          root_project)
+
+    def test_enable_project_with_disabled_parent(self):
+        projects_hierarchy = self._create_projects_hierarchy()
+        root_project = projects_hierarchy[0]
+        leaf_project = projects_hierarchy[1]
+
+        # Disable leaf and root
+        leaf_project['enabled'] = False
+        self.resource_api.update_project(leaf_project['id'], leaf_project)
+        root_project['enabled'] = False
+        self.resource_api.update_project(root_project['id'], root_project)
+
+        # Try to enable the leaf project, it's not possible since it has
+        # a disabled parent
+        leaf_project['enabled'] = True
+        self.assertRaises(exception.ForbiddenAction,
+                          self.resource_api.update_project,
+                          leaf_project['id'],
+                          leaf_project)
+
+    def _get_hierarchy_depth(self, project_id):
+        return len(self.resource_api.list_project_parents(project_id)) + 1
+
+    def test_check_hierarchy_depth(self):
+        # First create a hierarchy with the max allowed depth
+        projects_hierarchy = self._create_projects_hierarchy(
+            CONF.max_project_tree_depth)
+        leaf_project = projects_hierarchy[CONF.max_project_tree_depth - 1]
+
+        depth = self._get_hierarchy_depth(leaf_project['id'])
+        self.assertEqual(CONF.max_project_tree_depth, depth)
+
+        # Creating another project in the hierarchy shouldn't be allowed
+        project_id = uuid.uuid4().hex
+        project = {
+            'id': project_id,
+            'name': uuid.uuid4().hex,
+            'domain_id': DEFAULT_DOMAIN_ID,
+            'parent_id': leaf_project['id']}
+        self.assertRaises(exception.ForbiddenAction,
+                          self.resource_api.create_project,
+                          project_id,
+                          project)
+
+    def test_project_update_missing_attrs_with_a_value(self):
+        # Creating a project with no description attribute.
+        project = {'id': uuid.uuid4().hex,
+                   'name': uuid.uuid4().hex,
+                   'domain_id': DEFAULT_DOMAIN_ID,
+                   'enabled': True,
+                   'parent_id': None}
+        self.resource_api.create_project(project['id'], project)
+
+        # Add a description attribute.
+        project['description'] = uuid.uuid4().hex
+        self.resource_api.update_project(project['id'], project)
+
+        project_ref = self.resource_api.get_project(project['id'])
+        self.assertDictEqual(project_ref, project)
+
+    def test_project_update_missing_attrs_with_a_falsey_value(self):
+        # Creating a project with no description attribute.
+        project = {'id': uuid.uuid4().hex,
+                   'name': uuid.uuid4().hex,
+                   'domain_id': DEFAULT_DOMAIN_ID,
+                   'enabled': True,
+                   'parent_id': None}
+        self.resource_api.create_project(project['id'], project)
+
+        # Add a description attribute.
+        project['description'] = ''
+        self.resource_api.update_project(project['id'], project)
+
+        project_ref = self.resource_api.get_project(project['id'])
+        self.assertDictEqual(project_ref, project)
+
+    def test_domain_crud(self):
+        domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                  'enabled': True}
+        self.resource_api.create_domain(domain['id'], domain)
+        domain_ref = self.resource_api.get_domain(domain['id'])
+        self.assertDictEqual(domain_ref, domain)
+
+        domain['name'] = uuid.uuid4().hex
+        self.resource_api.update_domain(domain['id'], domain)
+        domain_ref = self.resource_api.get_domain(domain['id'])
+        self.assertDictEqual(domain_ref, domain)
+
+        # Ensure an 'enabled' domain cannot be deleted
+        self.assertRaises(exception.ForbiddenAction,
+                          self.resource_api.delete_domain,
+                          domain_id=domain['id'])
+
+        # Disable the domain
+        domain['enabled'] = False
+        self.resource_api.update_domain(domain['id'], domain)
+
+        # Delete the domain
+        self.resource_api.delete_domain(domain['id'])
+
+        # Make sure the domain no longer exists
+        self.assertRaises(exception.DomainNotFound,
+                          self.resource_api.get_domain,
+                          domain['id'])
+
+    @tests.skip_if_no_multiple_domains_support
+    def test_create_domain_case_sensitivity(self):
+        # create a ref with a lowercase name
+        ref = {
+            'id': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex.lower()}
+        self.resource_api.create_domain(ref['id'], ref)
+
+        # assign a new ID with the same name, but this time in uppercase
+        ref['id'] = uuid.uuid4().hex
+        ref['name'] = ref['name'].upper()
+        self.resource_api.create_domain(ref['id'], ref)
+
+    def test_attribute_update(self):
+        project = {
+            'domain_id': DEFAULT_DOMAIN_ID,
+            'id': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex}
+        self.resource_api.create_project(project['id'], project)
+
+        # pick a key known to be non-existent
+        key = 'description'
+
+        def assert_key_equals(value):
+            project_ref = self.resource_api.update_project(
+                project['id'], project)
+            self.assertEqual(value, project_ref[key])
+            project_ref = self.resource_api.get_project(project['id'])
+            self.assertEqual(value, project_ref[key])
+
+        def assert_get_key_is(value):
+            project_ref = self.resource_api.update_project(
+                project['id'], project)
+            self.assertIs(project_ref.get(key), value)
+            project_ref = self.resource_api.get_project(project['id'])
+            self.assertIs(project_ref.get(key), value)
+
+        # add an attribute that doesn't exist, set it to a falsey value
+        value = ''
+        project[key] = value
+        assert_key_equals(value)
+
+        # set an attribute with a falsey value to null
+        value = None
+        project[key] = value
+        assert_get_key_is(value)
+
+        # do it again, in case updating from this situation is handled oddly
+        value = None
+        project[key] = value
+        assert_get_key_is(value)
+
+        # set a possibly-null value to a falsey value
+        value = ''
+        project[key] = value
+        assert_key_equals(value)
+
+        # set a falsey value to a truthy value
+        value = uuid.uuid4().hex
+        project[key] = value
+        assert_key_equals(value)
+
+    def test_user_crud(self):
+        user_dict = {'domain_id': DEFAULT_DOMAIN_ID,
+                     'name': uuid.uuid4().hex, 'password': 'passw0rd'}
+        user = self.identity_api.create_user(user_dict)
+        user_ref = self.identity_api.get_user(user['id'])
+        del user_dict['password']
+        user_ref_dict = {x: user_ref[x] for x in user_ref}
+        self.assertDictContainsSubset(user_dict, user_ref_dict)
+
+        user_dict['password'] = uuid.uuid4().hex
+        self.identity_api.update_user(user['id'], user_dict)
+        user_ref = self.identity_api.get_user(user['id'])
+        del user_dict['password']
+        user_ref_dict = {x: user_ref[x] for x in user_ref}
+        self.assertDictContainsSubset(user_dict, user_ref_dict)
+
+        self.identity_api.delete_user(user['id'])
+        self.assertRaises(exception.UserNotFound,
+                          self.identity_api.get_user,
+                          user['id'])
+
+    def test_list_projects_for_user(self):
+        domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain['id'], domain)
+        user1 = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+                 'domain_id': domain['id'], 'enabled': True}
+        user1 = self.identity_api.create_user(user1)
+        user_projects = self.assignment_api.list_projects_for_user(user1['id'])
+        self.assertEqual(0, len(user_projects))
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         project_id=self.tenant_bar['id'],
+                                         role_id=self.role_member['id'])
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         project_id=self.tenant_baz['id'],
+                                         role_id=self.role_member['id'])
+        user_projects = self.assignment_api.list_projects_for_user(user1['id'])
+        self.assertEqual(2, len(user_projects))
+
+    def test_list_projects_for_user_with_grants(self):
+        # Create two groups each with a role on a different project, and
+        # make user1 a member of both groups.  Both these new projects
+        # should now be included, along with any direct user grants.
+        domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain['id'], domain)
+        user1 = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+                 'domain_id': domain['id'], 'enabled': True}
+        user1 = self.identity_api.create_user(user1)
+        group1 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
+        group1 = self.identity_api.create_group(group1)
+        group2 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
+        group2 = self.identity_api.create_group(group2)
+        project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain['id']}
+        self.resource_api.create_project(project1['id'], project1)
+        project2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain['id']}
+        self.resource_api.create_project(project2['id'], project2)
+        self.identity_api.add_user_to_group(user1['id'], group1['id'])
+        self.identity_api.add_user_to_group(user1['id'], group2['id'])
+
+        # Create 3 grants, one user grant, the other two as group grants
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         project_id=self.tenant_bar['id'],
+                                         role_id=self.role_member['id'])
+        self.assignment_api.create_grant(group_id=group1['id'],
+                                         project_id=project1['id'],
+                                         role_id=self.role_admin['id'])
+        self.assignment_api.create_grant(group_id=group2['id'],
+                                         project_id=project2['id'],
+                                         role_id=self.role_admin['id'])
+        user_projects = self.assignment_api.list_projects_for_user(user1['id'])
+        self.assertEqual(3, len(user_projects))
+
+    @tests.skip_if_cache_disabled('resource')
+    @tests.skip_if_no_multiple_domains_support
+    def test_domain_rename_invalidates_get_domain_by_name_cache(self):
+        domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                  'enabled': True}
+        domain_id = domain['id']
+        domain_name = domain['name']
+        self.resource_api.create_domain(domain_id, domain)
+        domain_ref = self.resource_api.get_domain_by_name(domain_name)
+        domain_ref['name'] = uuid.uuid4().hex
+        self.resource_api.update_domain(domain_id, domain_ref)
+        self.assertRaises(exception.DomainNotFound,
+                          self.resource_api.get_domain_by_name,
+                          domain_name)
+
+    @tests.skip_if_cache_disabled('resource')
+    def test_cache_layer_domain_crud(self):
+        domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                  'enabled': True}
+        domain_id = domain['id']
+        # Create Domain
+        self.resource_api.create_domain(domain_id, domain)
+        domain_ref = self.resource_api.get_domain(domain_id)
+        updated_domain_ref = copy.deepcopy(domain_ref)
+        updated_domain_ref['name'] = uuid.uuid4().hex
+        # Update domain, bypassing resource api manager
+        self.resource_api.driver.update_domain(domain_id, updated_domain_ref)
+        # Verify get_domain still returns the domain
+        self.assertDictContainsSubset(
+            domain_ref, self.resource_api.get_domain(domain_id))
+        # Invalidate cache
+        self.resource_api.get_domain.invalidate(self.resource_api,
+                                                domain_id)
+        # Verify get_domain returns the updated domain
+        self.assertDictContainsSubset(
+            updated_domain_ref, self.resource_api.get_domain(domain_id))
+        # Update the domain back to original ref, using the assignment api
+        # manager
+        self.resource_api.update_domain(domain_id, domain_ref)
+        self.assertDictContainsSubset(
+            domain_ref, self.resource_api.get_domain(domain_id))
+        # Make sure domain is 'disabled', bypass resource api manager
+        domain_ref_disabled = domain_ref.copy()
+        domain_ref_disabled['enabled'] = False
+        self.resource_api.driver.update_domain(domain_id,
+                                               domain_ref_disabled)
+        # Delete domain, bypassing resource api manager
+        self.resource_api.driver.delete_domain(domain_id)
+        # Verify get_domain still returns the domain
+        self.assertDictContainsSubset(
+            domain_ref, self.resource_api.get_domain(domain_id))
+        # Invalidate cache
+        self.resource_api.get_domain.invalidate(self.resource_api,
+                                                domain_id)
+        # Verify get_domain now raises DomainNotFound
+        self.assertRaises(exception.DomainNotFound,
+                          self.resource_api.get_domain, domain_id)
+        # Recreate Domain
+        self.resource_api.create_domain(domain_id, domain)
+        self.resource_api.get_domain(domain_id)
+        # Make sure domain is 'disabled', bypass resource api manager
+        domain['enabled'] = False
+        self.resource_api.driver.update_domain(domain_id, domain)
+        # Delete domain
+        self.resource_api.delete_domain(domain_id)
+        # verify DomainNotFound raised
+        self.assertRaises(exception.DomainNotFound,
+                          self.resource_api.get_domain,
+                          domain_id)
+
+    @tests.skip_if_cache_disabled('resource')
+    @tests.skip_if_no_multiple_domains_support
+    def test_project_rename_invalidates_get_project_by_name_cache(self):
+        domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                  'enabled': True}
+        project = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                   'domain_id': domain['id']}
+        project_id = project['id']
+        project_name = project['name']
+        self.resource_api.create_domain(domain['id'], domain)
+        # Create a project
+        self.resource_api.create_project(project_id, project)
+        self.resource_api.get_project_by_name(project_name, domain['id'])
+        project['name'] = uuid.uuid4().hex
+        self.resource_api.update_project(project_id, project)
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project_by_name,
+                          project_name,
+                          domain['id'])
+
+    @tests.skip_if_cache_disabled('resource')
+    @tests.skip_if_no_multiple_domains_support
+    def test_cache_layer_project_crud(self):
+        domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                  'enabled': True}
+        project = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                   'domain_id': domain['id']}
+        project_id = project['id']
+        self.resource_api.create_domain(domain['id'], domain)
+        # Create a project
+        self.resource_api.create_project(project_id, project)
+        self.resource_api.get_project(project_id)
+        updated_project = copy.deepcopy(project)
+        updated_project['name'] = uuid.uuid4().hex
+        # Update project, bypassing resource manager
+        self.resource_api.driver.update_project(project_id,
+                                                updated_project)
+        # Verify get_project still returns the original project_ref
+        self.assertDictContainsSubset(
+            project, self.resource_api.get_project(project_id))
+        # Invalidate cache
+        self.resource_api.get_project.invalidate(self.resource_api,
+                                                 project_id)
+        # Verify get_project now returns the new project
+        self.assertDictContainsSubset(
+            updated_project,
+            self.resource_api.get_project(project_id))
+        # Update project using the resource_api manager back to original
+        self.resource_api.update_project(project['id'], project)
+        # Verify get_project returns the original project_ref
+        self.assertDictContainsSubset(
+            project, self.resource_api.get_project(project_id))
+        # Delete project bypassing resource
+        self.resource_api.driver.delete_project(project_id)
+        # Verify get_project still returns the project_ref
+        self.assertDictContainsSubset(
+            project, self.resource_api.get_project(project_id))
+        # Invalidate cache
+        self.resource_api.get_project.invalidate(self.resource_api,
+                                                 project_id)
+        # Verify ProjectNotFound now raised
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project,
+                          project_id)
+        # recreate project
+        self.resource_api.create_project(project_id, project)
+        self.resource_api.get_project(project_id)
+        # delete project
+        self.resource_api.delete_project(project_id)
+        # Verify ProjectNotFound is raised
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project,
+                          project_id)
+
+    def create_user_dict(self, **attributes):
+        user_dict = {'name': uuid.uuid4().hex,
+                     'domain_id': DEFAULT_DOMAIN_ID,
+                     'enabled': True}
+        user_dict.update(attributes)
+        return user_dict
+
+    def test_arbitrary_attributes_are_returned_from_create_user(self):
+        attr_value = uuid.uuid4().hex
+        user_data = self.create_user_dict(arbitrary_attr=attr_value)
+
+        user = self.identity_api.create_user(user_data)
+
+        self.assertEqual(attr_value, user['arbitrary_attr'])
+
+    def test_arbitrary_attributes_are_returned_from_get_user(self):
+        attr_value = uuid.uuid4().hex
+        user_data = self.create_user_dict(arbitrary_attr=attr_value)
+
+        user_data = self.identity_api.create_user(user_data)
+
+        user = self.identity_api.get_user(user_data['id'])
+        self.assertEqual(attr_value, user['arbitrary_attr'])
+
+    def test_new_arbitrary_attributes_are_returned_from_update_user(self):
+        user_data = self.create_user_dict()
+
+        user = self.identity_api.create_user(user_data)
+        attr_value = uuid.uuid4().hex
+        user['arbitrary_attr'] = attr_value
+        updated_user = self.identity_api.update_user(user['id'], user)
+
+        self.assertEqual(attr_value, updated_user['arbitrary_attr'])
+
+    def test_updated_arbitrary_attributes_are_returned_from_update_user(self):
+        attr_value = uuid.uuid4().hex
+        user_data = self.create_user_dict(arbitrary_attr=attr_value)
+
+        new_attr_value = uuid.uuid4().hex
+        user = self.identity_api.create_user(user_data)
+        user['arbitrary_attr'] = new_attr_value
+        updated_user = self.identity_api.update_user(user['id'], user)
+
+        self.assertEqual(new_attr_value, updated_user['arbitrary_attr'])
+
+    def test_create_grant_no_user(self):
+        # If call create_grant with a user that doesn't exist, doesn't fail.
+        self.assignment_api.create_grant(
+            self.role_other['id'],
+            user_id=uuid.uuid4().hex,
+            project_id=self.tenant_bar['id'])
+
+    def test_create_grant_no_group(self):
+        # If call create_grant with a group that doesn't exist, doesn't fail.
+        self.assignment_api.create_grant(
+            self.role_other['id'],
+            group_id=uuid.uuid4().hex,
+            project_id=self.tenant_bar['id'])
+
+    @tests.skip_if_no_multiple_domains_support
+    def test_get_default_domain_by_name(self):
+        domain_name = 'default'
+
+        domain = {'id': uuid.uuid4().hex, 'name': domain_name, 'enabled': True}
+        self.resource_api.create_domain(domain['id'], domain)
+
+        domain_ref = self.resource_api.get_domain_by_name(domain_name)
+        self.assertEqual(domain, domain_ref)
+
+    def test_get_not_default_domain_by_name(self):
+        domain_name = 'foo'
+        self.assertRaises(exception.DomainNotFound,
+                          self.resource_api.get_domain_by_name,
+                          domain_name)
+
+    def test_project_update_and_project_get_return_same_response(self):
+        project = {
+            'id': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'domain_id': CONF.identity.default_domain_id,
+            'description': uuid.uuid4().hex,
+            'enabled': True}
+
+        self.resource_api.create_project(project['id'], project)
+
+        updated_project = {'enabled': False}
+        updated_project_ref = self.resource_api.update_project(
+            project['id'], updated_project)
+
+        # SQL backend adds 'extra' field
+        updated_project_ref.pop('extra', None)
+
+        self.assertIs(False, updated_project_ref['enabled'])
+
+        project_ref = self.resource_api.get_project(project['id'])
+        self.assertDictEqual(project_ref, updated_project_ref)
+
+    def test_user_update_and_user_get_return_same_response(self):
+        user = {
+            'name': uuid.uuid4().hex,
+            'domain_id': CONF.identity.default_domain_id,
+            'description': uuid.uuid4().hex,
+            'enabled': True}
+
+        user = self.identity_api.create_user(user)
+
+        updated_user = {'enabled': False}
+        updated_user_ref = self.identity_api.update_user(
+            user['id'], updated_user)
+
+        # SQL backend adds 'extra' field
+        updated_user_ref.pop('extra', None)
+
+        self.assertIs(False, updated_user_ref['enabled'])
+
+        user_ref = self.identity_api.get_user(user['id'])
+        self.assertDictEqual(user_ref, updated_user_ref)
+
+    def test_delete_group_removes_role_assignments(self):
+        # When a group is deleted any role assignments for the group are
+        # removed.
+
+        MEMBER_ROLE_ID = 'member'
+
+        def get_member_assignments():
+            assignments = self.assignment_api.list_role_assignments()
+            return filter(lambda x: x['role_id'] == MEMBER_ROLE_ID,
+                          assignments)
+
+        orig_member_assignments = get_member_assignments()
+
+        # Create a group.
+        new_group = {
+            'domain_id': DEFAULT_DOMAIN_ID,
+            'name': self.getUniqueString(prefix='tdgrra')}
+        new_group = self.identity_api.create_group(new_group)
+
+        # Create a project.
+        new_project = {
+            'id': uuid.uuid4().hex,
+            'name': self.getUniqueString(prefix='tdgrra'),
+            'domain_id': DEFAULT_DOMAIN_ID}
+        self.resource_api.create_project(new_project['id'], new_project)
+
+        # Assign a role to the group.
+        self.assignment_api.create_grant(
+            group_id=new_group['id'], project_id=new_project['id'],
+            role_id=MEMBER_ROLE_ID)
+
+        # Delete the group.
+        self.identity_api.delete_group(new_group['id'])
+
+        # Check that the role assignment for the group is gone
+        member_assignments = get_member_assignments()
+
+        self.assertThat(member_assignments,
+                        matchers.Equals(orig_member_assignments))
+
+    def test_get_roles_for_groups_on_domain(self):
+        """Test retrieving group domain roles.
+
+        Test Plan:
+
+        - Create a domain, three groups and three roles
+        - Assign one an inherited and the others a non-inherited group role
+          to the domain
+        - Ensure that only the non-inherited roles are returned on the domain
+
+        """
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        group_list = []
+        group_id_list = []
+        role_list = []
+        for _ in range(3):
+            group = {'name': uuid.uuid4().hex, 'domain_id': domain1['id']}
+            group = self.identity_api.create_group(group)
+            group_list.append(group)
+            group_id_list.append(group['id'])
+
+            role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+            self.role_api.create_role(role['id'], role)
+            role_list.append(role)
+
+        # Assign the roles - one is inherited
+        self.assignment_api.create_grant(group_id=group_list[0]['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role_list[0]['id'])
+        self.assignment_api.create_grant(group_id=group_list[1]['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role_list[1]['id'])
+        self.assignment_api.create_grant(group_id=group_list[2]['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role_list[2]['id'],
+                                         inherited_to_projects=True)
+
+        # Now get the effective roles for the groups on the domain project. We
+        # shouldn't get back the inherited role.
+
+        role_refs = self.assignment_api.get_roles_for_groups(
+            group_id_list, domain_id=domain1['id'])
+
+        self.assertThat(role_refs, matchers.HasLength(2))
+        self.assertIn(role_list[0], role_refs)
+        self.assertIn(role_list[1], role_refs)
+
+    def test_get_roles_for_groups_on_project(self):
+        """Test retrieving group project roles.
+
+        Test Plan:
+
+        - Create two domains, two projects, six groups and six roles
+        - Project1 is in Domain1, Project2 is in Domain2
+        - Domain2/Project2 are spoilers
+        - Assign a different direct group role to each project as well
+          as both an inherited and non-inherited role to each domain
+        - Get the group roles for Project 1 - depending on whether we have
+          enabled inheritance, we should either get back just the direct role
+          or both the direct one plus the inherited domain role from Domain 1
+
+        """
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain2['id'], domain2)
+        project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain1['id']}
+        self.resource_api.create_project(project1['id'], project1)
+        project2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain2['id']}
+        self.resource_api.create_project(project2['id'], project2)
+        group_list = []
+        group_id_list = []
+        role_list = []
+        for _ in range(6):
+            group = {'name': uuid.uuid4().hex, 'domain_id': domain1['id']}
+            group = self.identity_api.create_group(group)
+            group_list.append(group)
+            group_id_list.append(group['id'])
+
+            role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+            self.role_api.create_role(role['id'], role)
+            role_list.append(role)
+
+        # Assign the roles - one inherited and one non-inherited on Domain1,
+        # plus one on Project1
+        self.assignment_api.create_grant(group_id=group_list[0]['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role_list[0]['id'])
+        self.assignment_api.create_grant(group_id=group_list[1]['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role_list[1]['id'],
+                                         inherited_to_projects=True)
+        self.assignment_api.create_grant(group_id=group_list[2]['id'],
+                                         project_id=project1['id'],
+                                         role_id=role_list[2]['id'])
+
+        # ...and a duplicate set of spoiler assignments to Domain2/Project2
+        self.assignment_api.create_grant(group_id=group_list[3]['id'],
+                                         domain_id=domain2['id'],
+                                         role_id=role_list[3]['id'])
+        self.assignment_api.create_grant(group_id=group_list[4]['id'],
+                                         domain_id=domain2['id'],
+                                         role_id=role_list[4]['id'],
+                                         inherited_to_projects=True)
+        self.assignment_api.create_grant(group_id=group_list[5]['id'],
+                                         project_id=project2['id'],
+                                         role_id=role_list[5]['id'])
+
+        # Now get the effective roles for all groups on the Project1. With
+        # inheritance off, we should only get back the direct role.
+
+        self.config_fixture.config(group='os_inherit', enabled=False)
+        role_refs = self.assignment_api.get_roles_for_groups(
+            group_id_list, project_id=project1['id'])
+
+        self.assertThat(role_refs, matchers.HasLength(1))
+        self.assertIn(role_list[2], role_refs)
+
+        # With inheritance on, we should also get back the inherited role from
+        # its owning domain.
+
+        self.config_fixture.config(group='os_inherit', enabled=True)
+        role_refs = self.assignment_api.get_roles_for_groups(
+            group_id_list, project_id=project1['id'])
+
+        self.assertThat(role_refs, matchers.HasLength(2))
+        self.assertIn(role_list[1], role_refs)
+        self.assertIn(role_list[2], role_refs)
+
+    def test_list_domains_for_groups(self):
+        """Test retrieving domains for a list of groups.
+
+        Test Plan:
+
+        - Create three domains, three groups and one role
+        - Assign a non-inherited group role to two domains, and an inherited
+          group role to the third
+        - Ensure only the domains with non-inherited roles are returned
+
+        """
+        domain_list = []
+        group_list = []
+        group_id_list = []
+        for _ in range(3):
+            domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+            self.resource_api.create_domain(domain['id'], domain)
+            domain_list.append(domain)
+
+            group = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
+            group = self.identity_api.create_group(group)
+            group_list.append(group)
+            group_id_list.append(group['id'])
+
+        role1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.role_api.create_role(role1['id'], role1)
+
+        # Assign the roles - one is inherited
+        self.assignment_api.create_grant(group_id=group_list[0]['id'],
+                                         domain_id=domain_list[0]['id'],
+                                         role_id=role1['id'])
+        self.assignment_api.create_grant(group_id=group_list[1]['id'],
+                                         domain_id=domain_list[1]['id'],
+                                         role_id=role1['id'])
+        self.assignment_api.create_grant(group_id=group_list[2]['id'],
+                                         domain_id=domain_list[2]['id'],
+                                         role_id=role1['id'],
+                                         inherited_to_projects=True)
+
+        # Now list the domains that have roles for any of the 3 groups
+        # We shouldn't get back domain[2] since that had an inherited role.
+
+        domain_refs = (
+            self.assignment_api.list_domains_for_groups(group_id_list))
+
+        self.assertThat(domain_refs, matchers.HasLength(2))
+        self.assertIn(domain_list[0], domain_refs)
+        self.assertIn(domain_list[1], domain_refs)
+
+    def test_list_projects_for_groups(self):
+        """Test retrieving projects for a list of groups.
+
+        Test Plan:
+
+        - Create two domains, four projects, seven groups and seven roles
+        - Project1-3 are in Domain1, Project4 is in Domain2
+        - Domain2/Project4 are spoilers
+        - Project1 and 2 have direct group roles, Project3 has no direct
+          roles but should inherit a group role from Domain1
+        - Get the projects for the group roles that are assigned to Project1
+          Project2 and the inherited one on Domain1. Depending on whether we
+          have enabled inheritance, we should either get back just the projects
+          with direct roles (Project 1 and 2) or also Project3 due to its
+          inherited role from Domain1.
+
+        """
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain2['id'], domain2)
+        project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain1['id']}
+        project1 = self.resource_api.create_project(project1['id'], project1)
+        project2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain1['id']}
+        project2 = self.resource_api.create_project(project2['id'], project2)
+        project3 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain1['id']}
+        project3 = self.resource_api.create_project(project3['id'], project3)
+        project4 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain2['id']}
+        project4 = self.resource_api.create_project(project4['id'], project4)
+        group_list = []
+        role_list = []
+        for _ in range(7):
+            group = {'name': uuid.uuid4().hex, 'domain_id': domain1['id']}
+            group = self.identity_api.create_group(group)
+            group_list.append(group)
+
+            role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+            self.role_api.create_role(role['id'], role)
+            role_list.append(role)
+
+        # Assign the roles - one inherited and one non-inherited on Domain1,
+        # plus one on Project1 and Project2
+        self.assignment_api.create_grant(group_id=group_list[0]['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role_list[0]['id'])
+        self.assignment_api.create_grant(group_id=group_list[1]['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role_list[1]['id'],
+                                         inherited_to_projects=True)
+        self.assignment_api.create_grant(group_id=group_list[2]['id'],
+                                         project_id=project1['id'],
+                                         role_id=role_list[2]['id'])
+        self.assignment_api.create_grant(group_id=group_list[3]['id'],
+                                         project_id=project2['id'],
+                                         role_id=role_list[3]['id'])
+
+        # ...and a few of spoiler assignments to Domain2/Project4
+        self.assignment_api.create_grant(group_id=group_list[4]['id'],
+                                         domain_id=domain2['id'],
+                                         role_id=role_list[4]['id'])
+        self.assignment_api.create_grant(group_id=group_list[5]['id'],
+                                         domain_id=domain2['id'],
+                                         role_id=role_list[5]['id'],
+                                         inherited_to_projects=True)
+        self.assignment_api.create_grant(group_id=group_list[6]['id'],
+                                         project_id=project4['id'],
+                                         role_id=role_list[6]['id'])
+
+        # Now get the projects for the groups that have roles on Project1,
+        # Project2 and the inherited role on Domain!. With inheritance off,
+        # we should only get back the projects with direct role.
+
+        self.config_fixture.config(group='os_inherit', enabled=False)
+        group_id_list = [group_list[1]['id'], group_list[2]['id'],
+                         group_list[3]['id']]
+        project_refs = (
+            self.assignment_api.list_projects_for_groups(group_id_list))
+
+        self.assertThat(project_refs, matchers.HasLength(2))
+        self.assertIn(project1, project_refs)
+        self.assertIn(project2, project_refs)
+
+        # With inheritance on, we should also get back the Project3 due to the
+        # inherited role from its owning domain.
+
+        self.config_fixture.config(group='os_inherit', enabled=True)
+        project_refs = (
+            self.assignment_api.list_projects_for_groups(group_id_list))
+
+        self.assertThat(project_refs, matchers.HasLength(3))
+        self.assertIn(project1, project_refs)
+        self.assertIn(project2, project_refs)
+        self.assertIn(project3, project_refs)
+
+    def test_update_role_no_name(self):
+        # A user can update a role and not include the name.
+
+        # description is picked just because it's not name.
+        self.role_api.update_role(self.role_member['id'],
+                                  {'description': uuid.uuid4().hex})
+        # If the previous line didn't raise an exception then the test passes.
+
+    def test_update_role_same_name(self):
+        # A user can update a role and set the name to be the same as it was.
+
+        self.role_api.update_role(self.role_member['id'],
+                                  {'name': self.role_member['name']})
+        # If the previous line didn't raise an exception then the test passes.
+
+
+class TokenTests(object):
+    def _create_token_id(self):
+        # Use a token signed by the cms module
+        token_id = ""
+        for i in range(1, 20):
+            token_id += uuid.uuid4().hex
+        return cms.cms_sign_token(token_id,
+                                  CONF.signing.certfile,
+                                  CONF.signing.keyfile)
+
+    def _assert_revoked_token_list_matches_token_persistence(
+            self, revoked_token_id_list):
+        # Assert that the list passed in matches the list returned by the
+        # token persistence service
+        persistence_list = [
+            x['id']
+            for x in self.token_provider_api.list_revoked_tokens()
+        ]
+        self.assertEqual(persistence_list, revoked_token_id_list)
+
+    def test_token_crud(self):
+        token_id = self._create_token_id()
+        data = {'id': token_id, 'a': 'b',
+                'trust_id': None,
+                'user': {'id': 'testuserid'}}
+        data_ref = self.token_provider_api._persistence.create_token(token_id,
+                                                                     data)
+        expires = data_ref.pop('expires')
+        data_ref.pop('user_id')
+        self.assertIsInstance(expires, datetime.datetime)
+        data_ref.pop('id')
+        data.pop('id')
+        self.assertDictEqual(data_ref, data)
+
+        new_data_ref = self.token_provider_api._persistence.get_token(token_id)
+        expires = new_data_ref.pop('expires')
+        self.assertIsInstance(expires, datetime.datetime)
+        new_data_ref.pop('user_id')
+        new_data_ref.pop('id')
+
+        self.assertEqual(data, new_data_ref)
+
+        self.token_provider_api._persistence.delete_token(token_id)
+        self.assertRaises(
+            exception.TokenNotFound,
+            self.token_provider_api._persistence.get_token, token_id)
+        self.assertRaises(
+            exception.TokenNotFound,
+            self.token_provider_api._persistence.delete_token, token_id)
+
+    def create_token_sample_data(self, token_id=None, tenant_id=None,
+                                 trust_id=None, user_id=None, expires=None):
+        if token_id is None:
+            token_id = self._create_token_id()
+        if user_id is None:
+            user_id = 'testuserid'
+        # FIXME(morganfainberg): These tokens look nothing like "Real" tokens.
+        # This should be fixed when token issuance is cleaned up.
+        data = {'id': token_id, 'a': 'b',
+                'user': {'id': user_id}}
+        if tenant_id is not None:
+            data['tenant'] = {'id': tenant_id, 'name': tenant_id}
+        if tenant_id is NULL_OBJECT:
+            data['tenant'] = None
+        if expires is not None:
+            data['expires'] = expires
+        if trust_id is not None:
+            data['trust_id'] = trust_id
+            data.setdefault('access', {}).setdefault('trust', {})
+            # Testuserid2 is used here since a trustee will be different in
+            # the cases of impersonation and therefore should not match the
+            # token's user_id.
+            data['access']['trust']['trustee_user_id'] = 'testuserid2'
+        data['token_version'] = provider.V2
+        # Issue token stores a copy of all token data at token['token_data'].
+        # This emulates that assumption as part of the test.
+        data['token_data'] = copy.deepcopy(data)
+        new_token = self.token_provider_api._persistence.create_token(token_id,
+                                                                      data)
+        return new_token['id'], data
+
+    def test_delete_tokens(self):
+        tokens = self.token_provider_api._persistence._list_tokens(
+            'testuserid')
+        self.assertEqual(0, len(tokens))
+        token_id1, data = self.create_token_sample_data(
+            tenant_id='testtenantid')
+        token_id2, data = self.create_token_sample_data(
+            tenant_id='testtenantid')
+        token_id3, data = self.create_token_sample_data(
+            tenant_id='testtenantid',
+            user_id='testuserid1')
+        tokens = self.token_provider_api._persistence._list_tokens(
+            'testuserid')
+        self.assertEqual(2, len(tokens))
+        self.assertIn(token_id2, tokens)
+        self.assertIn(token_id1, tokens)
+        self.token_provider_api._persistence.delete_tokens(
+            user_id='testuserid',
+            tenant_id='testtenantid')
+        tokens = self.token_provider_api._persistence._list_tokens(
+            'testuserid')
+        self.assertEqual(0, len(tokens))
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api._persistence.get_token,
+                          token_id1)
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api._persistence.get_token,
+                          token_id2)
+
+        self.token_provider_api._persistence.get_token(token_id3)
+
+    def test_delete_tokens_trust(self):
+        tokens = self.token_provider_api._persistence._list_tokens(
+            user_id='testuserid')
+        self.assertEqual(0, len(tokens))
+        token_id1, data = self.create_token_sample_data(
+            tenant_id='testtenantid',
+            trust_id='testtrustid')
+        token_id2, data = self.create_token_sample_data(
+            tenant_id='testtenantid',
+            user_id='testuserid1',
+            trust_id='testtrustid1')
+        tokens = self.token_provider_api._persistence._list_tokens(
+            'testuserid')
+        self.assertEqual(1, len(tokens))
+        self.assertIn(token_id1, tokens)
+        self.token_provider_api._persistence.delete_tokens(
+            user_id='testuserid',
+            tenant_id='testtenantid',
+            trust_id='testtrustid')
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api._persistence.get_token,
+                          token_id1)
+        self.token_provider_api._persistence.get_token(token_id2)
+
+    def _test_token_list(self, token_list_fn):
+        tokens = token_list_fn('testuserid')
+        self.assertEqual(0, len(tokens))
+        token_id1, data = self.create_token_sample_data()
+        tokens = token_list_fn('testuserid')
+        self.assertEqual(1, len(tokens))
+        self.assertIn(token_id1, tokens)
+        token_id2, data = self.create_token_sample_data()
+        tokens = token_list_fn('testuserid')
+        self.assertEqual(2, len(tokens))
+        self.assertIn(token_id2, tokens)
+        self.assertIn(token_id1, tokens)
+        self.token_provider_api._persistence.delete_token(token_id1)
+        tokens = token_list_fn('testuserid')
+        self.assertIn(token_id2, tokens)
+        self.assertNotIn(token_id1, tokens)
+        self.token_provider_api._persistence.delete_token(token_id2)
+        tokens = token_list_fn('testuserid')
+        self.assertNotIn(token_id2, tokens)
+        self.assertNotIn(token_id1, tokens)
+
+        # tenant-specific tokens
+        tenant1 = uuid.uuid4().hex
+        tenant2 = uuid.uuid4().hex
+        token_id3, data = self.create_token_sample_data(tenant_id=tenant1)
+        token_id4, data = self.create_token_sample_data(tenant_id=tenant2)
+        # test for existing but empty tenant (LP:1078497)
+        token_id5, data = self.create_token_sample_data(tenant_id=NULL_OBJECT)
+        tokens = token_list_fn('testuserid')
+        self.assertEqual(3, len(tokens))
+        self.assertNotIn(token_id1, tokens)
+        self.assertNotIn(token_id2, tokens)
+        self.assertIn(token_id3, tokens)
+        self.assertIn(token_id4, tokens)
+        self.assertIn(token_id5, tokens)
+        tokens = token_list_fn('testuserid', tenant2)
+        self.assertEqual(1, len(tokens))
+        self.assertNotIn(token_id1, tokens)
+        self.assertNotIn(token_id2, tokens)
+        self.assertNotIn(token_id3, tokens)
+        self.assertIn(token_id4, tokens)
+
+    def test_token_list(self):
+        self._test_token_list(
+            self.token_provider_api._persistence._list_tokens)
+
+    def test_token_list_trust(self):
+        trust_id = uuid.uuid4().hex
+        token_id5, data = self.create_token_sample_data(trust_id=trust_id)
+        tokens = self.token_provider_api._persistence._list_tokens(
+            'testuserid', trust_id=trust_id)
+        self.assertEqual(1, len(tokens))
+        self.assertIn(token_id5, tokens)
+
+    def test_get_token_404(self):
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api._persistence.get_token,
+                          uuid.uuid4().hex)
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api._persistence.get_token,
+                          None)
+
+    def test_delete_token_404(self):
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api._persistence.delete_token,
+                          uuid.uuid4().hex)
+
+    def test_expired_token(self):
+        token_id = uuid.uuid4().hex
+        expire_time = timeutils.utcnow() - datetime.timedelta(minutes=1)
+        data = {'id_hash': token_id, 'id': token_id, 'a': 'b',
+                'expires': expire_time,
+                'trust_id': None,
+                'user': {'id': 'testuserid'}}
+        data_ref = self.token_provider_api._persistence.create_token(token_id,
+                                                                     data)
+        data_ref.pop('user_id')
+        self.assertDictEqual(data_ref, data)
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api._persistence.get_token,
+                          token_id)
+
+    def test_null_expires_token(self):
+        token_id = uuid.uuid4().hex
+        data = {'id': token_id, 'id_hash': token_id, 'a': 'b', 'expires': None,
+                'user': {'id': 'testuserid'}}
+        data_ref = self.token_provider_api._persistence.create_token(token_id,
+                                                                     data)
+        self.assertIsNotNone(data_ref['expires'])
+        new_data_ref = self.token_provider_api._persistence.get_token(token_id)
+
+        # MySQL doesn't store microseconds, so discard them before testing
+        data_ref['expires'] = data_ref['expires'].replace(microsecond=0)
+        new_data_ref['expires'] = new_data_ref['expires'].replace(
+            microsecond=0)
+
+        self.assertEqual(data_ref, new_data_ref)
+
+    def check_list_revoked_tokens(self, token_ids):
+        revoked_ids = [x['id']
+                       for x in self.token_provider_api.list_revoked_tokens()]
+        self._assert_revoked_token_list_matches_token_persistence(revoked_ids)
+        for token_id in token_ids:
+            self.assertIn(token_id, revoked_ids)
+
+    def delete_token(self):
+        token_id = uuid.uuid4().hex
+        data = {'id_hash': token_id, 'id': token_id, 'a': 'b',
+                'user': {'id': 'testuserid'}}
+        data_ref = self.token_provider_api._persistence.create_token(token_id,
+                                                                     data)
+        self.token_provider_api._persistence.delete_token(token_id)
+        self.assertRaises(
+            exception.TokenNotFound,
+            self.token_provider_api._persistence.get_token,
+            data_ref['id'])
+        self.assertRaises(
+            exception.TokenNotFound,
+            self.token_provider_api._persistence.delete_token,
+            data_ref['id'])
+        return token_id
+
+    def test_list_revoked_tokens_returns_empty_list(self):
+        revoked_ids = [x['id']
+                       for x in self.token_provider_api.list_revoked_tokens()]
+        self._assert_revoked_token_list_matches_token_persistence(revoked_ids)
+        self.assertEqual([], revoked_ids)
+
+    def test_list_revoked_tokens_for_single_token(self):
+        self.check_list_revoked_tokens([self.delete_token()])
+
+    def test_list_revoked_tokens_for_multiple_tokens(self):
+        self.check_list_revoked_tokens([self.delete_token()
+                                        for x in six.moves.range(2)])
+
+    def test_flush_expired_token(self):
+        token_id = uuid.uuid4().hex
+        expire_time = timeutils.utcnow() - datetime.timedelta(minutes=1)
+        data = {'id_hash': token_id, 'id': token_id, 'a': 'b',
+                'expires': expire_time,
+                'trust_id': None,
+                'user': {'id': 'testuserid'}}
+        data_ref = self.token_provider_api._persistence.create_token(token_id,
+                                                                     data)
+        data_ref.pop('user_id')
+        self.assertDictEqual(data_ref, data)
+
+        token_id = uuid.uuid4().hex
+        expire_time = timeutils.utcnow() + datetime.timedelta(minutes=1)
+        data = {'id_hash': token_id, 'id': token_id, 'a': 'b',
+                'expires': expire_time,
+                'trust_id': None,
+                'user': {'id': 'testuserid'}}
+        data_ref = self.token_provider_api._persistence.create_token(token_id,
+                                                                     data)
+        data_ref.pop('user_id')
+        self.assertDictEqual(data_ref, data)
+
+        self.token_provider_api._persistence.flush_expired_tokens()
+        tokens = self.token_provider_api._persistence._list_tokens(
+            'testuserid')
+        self.assertEqual(1, len(tokens))
+        self.assertIn(token_id, tokens)
+
+    @tests.skip_if_cache_disabled('token')
+    def test_revocation_list_cache(self):
+        expire_time = timeutils.utcnow() + datetime.timedelta(minutes=10)
+        token_id = uuid.uuid4().hex
+        token_data = {'id_hash': token_id, 'id': token_id, 'a': 'b',
+                      'expires': expire_time,
+                      'trust_id': None,
+                      'user': {'id': 'testuserid'}}
+        token2_id = uuid.uuid4().hex
+        token2_data = {'id_hash': token2_id, 'id': token2_id, 'a': 'b',
+                       'expires': expire_time,
+                       'trust_id': None,
+                       'user': {'id': 'testuserid'}}
+        # Create 2 Tokens.
+        self.token_provider_api._persistence.create_token(token_id,
+                                                          token_data)
+        self.token_provider_api._persistence.create_token(token2_id,
+                                                          token2_data)
+        # Verify the revocation list is empty.
+        self.assertEqual(
+            [], self.token_provider_api._persistence.list_revoked_tokens())
+        self.assertEqual([], self.token_provider_api.list_revoked_tokens())
+        # Delete a token directly, bypassing the manager.
+        self.token_provider_api._persistence.driver.delete_token(token_id)
+        # Verify the revocation list is still empty.
+        self.assertEqual(
+            [], self.token_provider_api._persistence.list_revoked_tokens())
+        self.assertEqual([], self.token_provider_api.list_revoked_tokens())
+        # Invalidate the revocation list.
+        self.token_provider_api._persistence.invalidate_revocation_list()
+        # Verify the deleted token is in the revocation list.
+        revoked_ids = [x['id']
+                       for x in self.token_provider_api.list_revoked_tokens()]
+        self._assert_revoked_token_list_matches_token_persistence(revoked_ids)
+        self.assertIn(token_id, revoked_ids)
+        # Delete the second token, through the manager
+        self.token_provider_api._persistence.delete_token(token2_id)
+        revoked_ids = [x['id']
+                       for x in self.token_provider_api.list_revoked_tokens()]
+        self._assert_revoked_token_list_matches_token_persistence(revoked_ids)
+        # Verify both tokens are in the revocation list.
+        self.assertIn(token_id, revoked_ids)
+        self.assertIn(token2_id, revoked_ids)
+
+    def _test_predictable_revoked_pki_token_id(self, hash_fn):
+        token_id = self._create_token_id()
+        token_id_hash = hash_fn(token_id).hexdigest()
+        token = {'user': {'id': uuid.uuid4().hex}}
+
+        self.token_provider_api._persistence.create_token(token_id, token)
+        self.token_provider_api._persistence.delete_token(token_id)
+
+        revoked_ids = [x['id']
+                       for x in self.token_provider_api.list_revoked_tokens()]
+        self._assert_revoked_token_list_matches_token_persistence(revoked_ids)
+        self.assertIn(token_id_hash, revoked_ids)
+        self.assertNotIn(token_id, revoked_ids)
+        for t in self.token_provider_api._persistence.list_revoked_tokens():
+            self.assertIn('expires', t)
+
+    def test_predictable_revoked_pki_token_id_default(self):
+        self._test_predictable_revoked_pki_token_id(hashlib.md5)
+
+    def test_predictable_revoked_pki_token_id_sha256(self):
+        self.config_fixture.config(group='token', hash_algorithm='sha256')
+        self._test_predictable_revoked_pki_token_id(hashlib.sha256)
+
+    def test_predictable_revoked_uuid_token_id(self):
+        token_id = uuid.uuid4().hex
+        token = {'user': {'id': uuid.uuid4().hex}}
+
+        self.token_provider_api._persistence.create_token(token_id, token)
+        self.token_provider_api._persistence.delete_token(token_id)
+
+        revoked_tokens = self.token_provider_api.list_revoked_tokens()
+        revoked_ids = [x['id'] for x in revoked_tokens]
+        self._assert_revoked_token_list_matches_token_persistence(revoked_ids)
+        self.assertIn(token_id, revoked_ids)
+        for t in revoked_tokens:
+            self.assertIn('expires', t)
+
+    def test_create_unicode_token_id(self):
+        token_id = six.text_type(self._create_token_id())
+        self.create_token_sample_data(token_id=token_id)
+        self.token_provider_api._persistence.get_token(token_id)
+
+    def test_create_unicode_user_id(self):
+        user_id = six.text_type(uuid.uuid4().hex)
+        token_id, data = self.create_token_sample_data(user_id=user_id)
+        self.token_provider_api._persistence.get_token(token_id)
+
+    def test_token_expire_timezone(self):
+
+        @test_utils.timezone
+        def _create_token(expire_time):
+            token_id = uuid.uuid4().hex
+            user_id = six.text_type(uuid.uuid4().hex)
+            return self.create_token_sample_data(token_id=token_id,
+                                                 user_id=user_id,
+                                                 expires=expire_time)
+
+        for d in ['+0', '-11', '-8', '-5', '+5', '+8', '+14']:
+            test_utils.TZ = 'UTC' + d
+            expire_time = timeutils.utcnow() + datetime.timedelta(minutes=1)
+            token_id, data_in = _create_token(expire_time)
+            data_get = self.token_provider_api._persistence.get_token(token_id)
+
+            self.assertEqual(data_in['id'], data_get['id'],
+                             'TZ=%s' % test_utils.TZ)
+
+            expire_time_expired = (
+                timeutils.utcnow() + datetime.timedelta(minutes=-1))
+            token_id, data_in = _create_token(expire_time_expired)
+            self.assertRaises(exception.TokenNotFound,
+                              self.token_provider_api._persistence.get_token,
+                              data_in['id'])
+
+
+class TokenCacheInvalidation(object):
+    def _create_test_data(self):
+        self.user = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                     'password': uuid.uuid4().hex,
+                     'domain_id': DEFAULT_DOMAIN_ID, 'enabled': True}
+        self.tenant = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                       'domain_id': DEFAULT_DOMAIN_ID, 'enabled': True}
+
+        # Create an equivalent of a scoped token
+        token_dict = {'user': self.user, 'tenant': self.tenant,
+                      'metadata': {}, 'id': 'placeholder'}
+        token_id, data = self.token_provider_api.issue_v2_token(token_dict)
+        self.scoped_token_id = token_id
+
+        # ..and an un-scoped one
+        token_dict = {'user': self.user, 'tenant': None,
+                      'metadata': {}, 'id': 'placeholder'}
+        token_id, data = self.token_provider_api.issue_v2_token(token_dict)
+        self.unscoped_token_id = token_id
+
+        # Validate them, in the various ways possible - this will load the
+        # responses into the token cache.
+        self._check_scoped_tokens_are_valid()
+        self._check_unscoped_tokens_are_valid()
+
+    def _check_unscoped_tokens_are_invalid(self):
+        self.assertRaises(
+            exception.TokenNotFound,
+            self.token_provider_api.validate_token,
+            self.unscoped_token_id)
+        self.assertRaises(
+            exception.TokenNotFound,
+            self.token_provider_api.validate_v2_token,
+            self.unscoped_token_id)
+
+    def _check_scoped_tokens_are_invalid(self):
+        self.assertRaises(
+            exception.TokenNotFound,
+            self.token_provider_api.validate_token,
+            self.scoped_token_id)
+        self.assertRaises(
+            exception.TokenNotFound,
+            self.token_provider_api.validate_token,
+            self.scoped_token_id,
+            self.tenant['id'])
+        self.assertRaises(
+            exception.TokenNotFound,
+            self.token_provider_api.validate_v2_token,
+            self.scoped_token_id)
+        self.assertRaises(
+            exception.TokenNotFound,
+            self.token_provider_api.validate_v2_token,
+            self.scoped_token_id,
+            self.tenant['id'])
+
+    def _check_scoped_tokens_are_valid(self):
+        self.token_provider_api.validate_token(self.scoped_token_id)
+        self.token_provider_api.validate_token(
+            self.scoped_token_id, belongs_to=self.tenant['id'])
+        self.token_provider_api.validate_v2_token(self.scoped_token_id)
+        self.token_provider_api.validate_v2_token(
+            self.scoped_token_id, belongs_to=self.tenant['id'])
+
+    def _check_unscoped_tokens_are_valid(self):
+        self.token_provider_api.validate_token(self.unscoped_token_id)
+        self.token_provider_api.validate_v2_token(self.unscoped_token_id)
+
+    def test_delete_unscoped_token(self):
+        self.token_provider_api._persistence.delete_token(
+            self.unscoped_token_id)
+        self._check_unscoped_tokens_are_invalid()
+        self._check_scoped_tokens_are_valid()
+
+    def test_delete_scoped_token_by_id(self):
+        self.token_provider_api._persistence.delete_token(self.scoped_token_id)
+        self._check_scoped_tokens_are_invalid()
+        self._check_unscoped_tokens_are_valid()
+
+    def test_delete_scoped_token_by_user(self):
+        self.token_provider_api._persistence.delete_tokens(self.user['id'])
+        # Since we are deleting all tokens for this user, they should all
+        # now be invalid.
+        self._check_scoped_tokens_are_invalid()
+        self._check_unscoped_tokens_are_invalid()
+
+    def test_delete_scoped_token_by_user_and_tenant(self):
+        self.token_provider_api._persistence.delete_tokens(
+            self.user['id'],
+            tenant_id=self.tenant['id'])
+        self._check_scoped_tokens_are_invalid()
+        self._check_unscoped_tokens_are_valid()
+
+
+class TrustTests(object):
+    def create_sample_trust(self, new_id, remaining_uses=None):
+        self.trustor = self.user_foo
+        self.trustee = self.user_two
+        trust_data = (self.trust_api.create_trust
+                      (new_id,
+                       {'trustor_user_id': self.trustor['id'],
+                        'trustee_user_id': self.user_two['id'],
+                        'project_id': self.tenant_bar['id'],
+                        'expires_at': timeutils.
+                        parse_isotime('2031-02-18T18:10:00Z'),
+                        'impersonation': True,
+                        'remaining_uses': remaining_uses},
+                       roles=[{"id": "member"},
+                              {"id": "other"},
+                              {"id": "browser"}]))
+        return trust_data
+
+    def test_delete_trust(self):
+        new_id = uuid.uuid4().hex
+        trust_data = self.create_sample_trust(new_id)
+        trust_id = trust_data['id']
+        self.assertIsNotNone(trust_data)
+        trust_data = self.trust_api.get_trust(trust_id)
+        self.assertEqual(new_id, trust_data['id'])
+        self.trust_api.delete_trust(trust_id)
+        self.assertIsNone(self.trust_api.get_trust(trust_id))
+
+    def test_delete_trust_not_found(self):
+        trust_id = uuid.uuid4().hex
+        self.assertRaises(exception.TrustNotFound,
+                          self.trust_api.delete_trust,
+                          trust_id)
+
+    def test_get_trust(self):
+        new_id = uuid.uuid4().hex
+        trust_data = self.create_sample_trust(new_id)
+        trust_id = trust_data['id']
+        self.assertIsNotNone(trust_data)
+        trust_data = self.trust_api.get_trust(trust_id)
+        self.assertEqual(new_id, trust_data['id'])
+        self.trust_api.delete_trust(trust_data['id'])
+
+    def test_get_deleted_trust(self):
+        new_id = uuid.uuid4().hex
+        trust_data = self.create_sample_trust(new_id)
+        self.assertIsNotNone(trust_data)
+        self.assertIsNone(trust_data['deleted_at'])
+        self.trust_api.delete_trust(new_id)
+        self.assertIsNone(self.trust_api.get_trust(new_id))
+        deleted_trust = self.trust_api.get_trust(trust_data['id'],
+                                                 deleted=True)
+        self.assertEqual(trust_data['id'], deleted_trust['id'])
+        self.assertIsNotNone(deleted_trust.get('deleted_at'))
+
+    def test_create_trust(self):
+        new_id = uuid.uuid4().hex
+        trust_data = self.create_sample_trust(new_id)
+
+        self.assertEqual(new_id, trust_data['id'])
+        self.assertEqual(self.trustee['id'], trust_data['trustee_user_id'])
+        self.assertEqual(self.trustor['id'], trust_data['trustor_user_id'])
+        self.assertTrue(timeutils.normalize_time(trust_data['expires_at']) >
+                        timeutils.utcnow())
+
+        self.assertEqual([{'id': 'member'},
+                          {'id': 'other'},
+                          {'id': 'browser'}], trust_data['roles'])
+
+    def test_list_trust_by_trustee(self):
+        for i in range(3):
+            self.create_sample_trust(uuid.uuid4().hex)
+        trusts = self.trust_api.list_trusts_for_trustee(self.trustee['id'])
+        self.assertEqual(3, len(trusts))
+        self.assertEqual(trusts[0]["trustee_user_id"], self.trustee['id'])
+        trusts = self.trust_api.list_trusts_for_trustee(self.trustor['id'])
+        self.assertEqual(0, len(trusts))
+
+    def test_list_trust_by_trustor(self):
+        for i in range(3):
+            self.create_sample_trust(uuid.uuid4().hex)
+        trusts = self.trust_api.list_trusts_for_trustor(self.trustor['id'])
+        self.assertEqual(3, len(trusts))
+        self.assertEqual(trusts[0]["trustor_user_id"], self.trustor['id'])
+        trusts = self.trust_api.list_trusts_for_trustor(self.trustee['id'])
+        self.assertEqual(0, len(trusts))
+
+    def test_list_trusts(self):
+        for i in range(3):
+            self.create_sample_trust(uuid.uuid4().hex)
+        trusts = self.trust_api.list_trusts()
+        self.assertEqual(3, len(trusts))
+
+    def test_trust_has_remaining_uses_positive(self):
+        # create a trust with limited uses, check that we have uses left
+        trust_data = self.create_sample_trust(uuid.uuid4().hex,
+                                              remaining_uses=5)
+        self.assertEqual(5, trust_data['remaining_uses'])
+        # create a trust with unlimited uses, check that we have uses left
+        trust_data = self.create_sample_trust(uuid.uuid4().hex)
+        self.assertIsNone(trust_data['remaining_uses'])
+
+    def test_trust_has_remaining_uses_negative(self):
+        # try to create a trust with no remaining uses, check that it fails
+        self.assertRaises(exception.ValidationError,
+                          self.create_sample_trust,
+                          uuid.uuid4().hex,
+                          remaining_uses=0)
+        # try to create a trust with negative remaining uses,
+        # check that it fails
+        self.assertRaises(exception.ValidationError,
+                          self.create_sample_trust,
+                          uuid.uuid4().hex,
+                          remaining_uses=-12)
+
+    def test_consume_use(self):
+        # consume a trust repeatedly until it has no uses anymore
+        trust_data = self.create_sample_trust(uuid.uuid4().hex,
+                                              remaining_uses=2)
+        self.trust_api.consume_use(trust_data['id'])
+        t = self.trust_api.get_trust(trust_data['id'])
+        self.assertEqual(1, t['remaining_uses'])
+        self.trust_api.consume_use(trust_data['id'])
+        # This was the last use, the trust isn't available anymore
+        self.assertIsNone(self.trust_api.get_trust(trust_data['id']))
+
+
+class CatalogTests(object):
+
+    _legacy_endpoint_id_in_endpoint = False
+    _enabled_default_to_true_when_creating_endpoint = False
+
+    def test_region_crud(self):
+        # create
+        region_id = '0' * 255
+        new_region = {
+            'id': region_id,
+            'description': uuid.uuid4().hex,
+        }
+        res = self.catalog_api.create_region(
+            new_region.copy())
+        # Ensure that we don't need to have a
+        # parent_region_id in the original supplied
+        # ref dict, but that it will be returned from
+        # the endpoint, with None value.
+        expected_region = new_region.copy()
+        expected_region['parent_region_id'] = None
+        self.assertDictEqual(res, expected_region)
+
+        # Test adding another region with the one above
+        # as its parent. We will check below whether deleting
+        # the parent successfully deletes any child regions.
+        parent_region_id = region_id
+        region_id = uuid.uuid4().hex
+        new_region = {
+            'id': region_id,
+            'description': uuid.uuid4().hex,
+            'parent_region_id': parent_region_id,
+        }
+        res = self.catalog_api.create_region(
+            new_region.copy())
+        self.assertDictEqual(new_region, res)
+
+        # list
+        regions = self.catalog_api.list_regions()
+        self.assertThat(regions, matchers.HasLength(2))
+        region_ids = [x['id'] for x in regions]
+        self.assertIn(parent_region_id, region_ids)
+        self.assertIn(region_id, region_ids)
+
+        # update
+        region_desc_update = {'description': uuid.uuid4().hex}
+        res = self.catalog_api.update_region(region_id, region_desc_update)
+        expected_region = new_region.copy()
+        expected_region['description'] = region_desc_update['description']
+        self.assertDictEqual(expected_region, res)
+
+        # delete
+        self.catalog_api.delete_region(parent_region_id)
+        self.assertRaises(exception.RegionNotFound,
+                          self.catalog_api.delete_region,
+                          parent_region_id)
+        self.assertRaises(exception.RegionNotFound,
+                          self.catalog_api.get_region,
+                          parent_region_id)
+        # Ensure the child is also gone...
+        self.assertRaises(exception.RegionNotFound,
+                          self.catalog_api.get_region,
+                          region_id)
+
+    def _create_region_with_parent_id(self, parent_id=None):
+        new_region = {
+            'id': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+            'parent_region_id': parent_id
+        }
+        self.catalog_api.create_region(
+            new_region)
+        return new_region
+
+    def test_list_regions_filtered_by_parent_region_id(self):
+        new_region = self._create_region_with_parent_id()
+        parent_id = new_region['id']
+        new_region = self._create_region_with_parent_id(parent_id)
+        new_region = self._create_region_with_parent_id(parent_id)
+
+        # filter by parent_region_id
+        hints = driver_hints.Hints()
+        hints.add_filter('parent_region_id', parent_id)
+        regions = self.catalog_api.list_regions(hints)
+        for region in regions:
+            self.assertEqual(parent_id, region['parent_region_id'])
+
+    @tests.skip_if_cache_disabled('catalog')
+    def test_cache_layer_region_crud(self):
+        region_id = uuid.uuid4().hex
+        new_region = {
+            'id': region_id,
+            'description': uuid.uuid4().hex,
+        }
+        self.catalog_api.create_region(new_region.copy())
+        updated_region = copy.deepcopy(new_region)
+        updated_region['description'] = uuid.uuid4().hex
+        # cache the result
+        self.catalog_api.get_region(region_id)
+        # update the region bypassing catalog_api
+        self.catalog_api.driver.update_region(region_id, updated_region)
+        self.assertDictContainsSubset(new_region,
+                                      self.catalog_api.get_region(region_id))
+        self.catalog_api.get_region.invalidate(self.catalog_api, region_id)
+        self.assertDictContainsSubset(updated_region,
+                                      self.catalog_api.get_region(region_id))
+        # delete the region
+        self.catalog_api.driver.delete_region(region_id)
+        # still get the old region
+        self.assertDictContainsSubset(updated_region,
+                                      self.catalog_api.get_region(region_id))
+        self.catalog_api.get_region.invalidate(self.catalog_api, region_id)
+        self.assertRaises(exception.RegionNotFound,
+                          self.catalog_api.get_region, region_id)
+
+    @tests.skip_if_cache_disabled('catalog')
+    def test_invalidate_cache_when_updating_region(self):
+        region_id = uuid.uuid4().hex
+        new_region = {
+            'id': region_id,
+            'description': uuid.uuid4().hex
+        }
+        self.catalog_api.create_region(new_region)
+
+        # cache the region
+        self.catalog_api.get_region(region_id)
+
+        # update the region via catalog_api
+        new_description = {'description': uuid.uuid4().hex}
+        self.catalog_api.update_region(region_id, new_description)
+
+        # assert that we can get the new region
+        current_region = self.catalog_api.get_region(region_id)
+        self.assertEqual(new_description['description'],
+                         current_region['description'])
+
+    def test_create_region_with_duplicate_id(self):
+        region_id = uuid.uuid4().hex
+        new_region = {
+            'id': region_id,
+            'description': uuid.uuid4().hex
+        }
+        self.catalog_api.create_region(new_region)
+        # Create region again with duplicate id
+        self.assertRaises(exception.Conflict,
+                          self.catalog_api.create_region,
+                          new_region)
+
+    def test_get_region_404(self):
+        self.assertRaises(exception.RegionNotFound,
+                          self.catalog_api.get_region,
+                          uuid.uuid4().hex)
+
+    def test_delete_region_404(self):
+        self.assertRaises(exception.RegionNotFound,
+                          self.catalog_api.delete_region,
+                          uuid.uuid4().hex)
+
+    def test_create_region_invalid_parent_region_404(self):
+        region_id = uuid.uuid4().hex
+        new_region = {
+            'id': region_id,
+            'description': uuid.uuid4().hex,
+            'parent_region_id': 'nonexisting'
+        }
+        self.assertRaises(exception.RegionNotFound,
+                          self.catalog_api.create_region,
+                          new_region)
+
+    def test_avoid_creating_circular_references_in_regions_update(self):
+        region_one = self._create_region_with_parent_id()
+
+        # self circle: region_one->region_one
+        self.assertRaises(exception.CircularRegionHierarchyError,
+                          self.catalog_api.update_region,
+                          region_one['id'],
+                          {'parent_region_id': region_one['id']})
+
+        # region_one->region_two->region_one
+        region_two = self._create_region_with_parent_id(region_one['id'])
+        self.assertRaises(exception.CircularRegionHierarchyError,
+                          self.catalog_api.update_region,
+                          region_one['id'],
+                          {'parent_region_id': region_two['id']})
+
+        # region_one region_two->region_three->region_four->region_two
+        region_three = self._create_region_with_parent_id(region_two['id'])
+        region_four = self._create_region_with_parent_id(region_three['id'])
+        self.assertRaises(exception.CircularRegionHierarchyError,
+                          self.catalog_api.update_region,
+                          region_two['id'],
+                          {'parent_region_id': region_four['id']})
+
+    @mock.patch.object(core.Driver,
+                       "_ensure_no_circle_in_hierarchical_regions")
+    def test_circular_regions_can_be_deleted(self, mock_ensure_on_circle):
+        # turn off the enforcement so that cycles can be created for the test
+        mock_ensure_on_circle.return_value = None
+
+        region_one = self._create_region_with_parent_id()
+
+        # self circle: region_one->region_one
+        self.catalog_api.update_region(
+            region_one['id'],
+            {'parent_region_id': region_one['id']})
+        self.catalog_api.delete_region(region_one['id'])
+        self.assertRaises(exception.RegionNotFound,
+                          self.catalog_api.get_region,
+                          region_one['id'])
+
+        # region_one->region_two->region_one
+        region_one = self._create_region_with_parent_id()
+        region_two = self._create_region_with_parent_id(region_one['id'])
+        self.catalog_api.update_region(
+            region_one['id'],
+            {'parent_region_id': region_two['id']})
+        self.catalog_api.delete_region(region_one['id'])
+        self.assertRaises(exception.RegionNotFound,
+                          self.catalog_api.get_region,
+                          region_one['id'])
+        self.assertRaises(exception.RegionNotFound,
+                          self.catalog_api.get_region,
+                          region_two['id'])
+
+        # region_one->region_two->region_three->region_one
+        region_one = self._create_region_with_parent_id()
+        region_two = self._create_region_with_parent_id(region_one['id'])
+        region_three = self._create_region_with_parent_id(region_two['id'])
+        self.catalog_api.update_region(
+            region_one['id'],
+            {'parent_region_id': region_three['id']})
+        self.catalog_api.delete_region(region_two['id'])
+        self.assertRaises(exception.RegionNotFound,
+                          self.catalog_api.get_region,
+                          region_two['id'])
+        self.assertRaises(exception.RegionNotFound,
+                          self.catalog_api.get_region,
+                          region_one['id'])
+        self.assertRaises(exception.RegionNotFound,
+                          self.catalog_api.get_region,
+                          region_three['id'])
+
+    def test_service_crud(self):
+        # create
+        service_id = uuid.uuid4().hex
+        new_service = {
+            'id': service_id,
+            'type': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+        }
+        res = self.catalog_api.create_service(
+            service_id,
+            new_service.copy())
+        new_service['enabled'] = True
+        self.assertDictEqual(new_service, res)
+
+        # list
+        services = self.catalog_api.list_services()
+        self.assertIn(service_id, [x['id'] for x in services])
+
+        # update
+        service_name_update = {'name': uuid.uuid4().hex}
+        res = self.catalog_api.update_service(service_id, service_name_update)
+        expected_service = new_service.copy()
+        expected_service['name'] = service_name_update['name']
+        self.assertDictEqual(expected_service, res)
+
+        # delete
+        self.catalog_api.delete_service(service_id)
+        self.assertRaises(exception.ServiceNotFound,
+                          self.catalog_api.delete_service,
+                          service_id)
+        self.assertRaises(exception.ServiceNotFound,
+                          self.catalog_api.get_service,
+                          service_id)
+
+    def _create_random_service(self):
+        service_id = uuid.uuid4().hex
+        new_service = {
+            'id': service_id,
+            'type': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+        }
+        return self.catalog_api.create_service(service_id, new_service.copy())
+
+    def test_service_filtering(self):
+        target_service = self._create_random_service()
+        unrelated_service1 = self._create_random_service()
+        unrelated_service2 = self._create_random_service()
+
+        # filter by type
+        hint_for_type = driver_hints.Hints()
+        hint_for_type.add_filter(name="type", value=target_service['type'])
+        services = self.catalog_api.list_services(hint_for_type)
+
+        self.assertEqual(1, len(services))
+        filtered_service = services[0]
+        self.assertEqual(target_service['type'], filtered_service['type'])
+        self.assertEqual(target_service['id'], filtered_service['id'])
+
+        # filter should have been removed, since it was already used by the
+        # backend
+        self.assertEqual(0, len(hint_for_type.filters))
+
+        # the backend shouldn't filter by name, since this is handled by the
+        # front end
+        hint_for_name = driver_hints.Hints()
+        hint_for_name.add_filter(name="name", value=target_service['name'])
+        services = self.catalog_api.list_services(hint_for_name)
+
+        self.assertEqual(3, len(services))
+
+        # filter should still be there, since it wasn't used by the backend
+        self.assertEqual(1, len(hint_for_name.filters))
+
+        self.catalog_api.delete_service(target_service['id'])
+        self.catalog_api.delete_service(unrelated_service1['id'])
+        self.catalog_api.delete_service(unrelated_service2['id'])
+
+    @tests.skip_if_cache_disabled('catalog')
+    def test_cache_layer_service_crud(self):
+        service_id = uuid.uuid4().hex
+        new_service = {
+            'id': service_id,
+            'type': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+        }
+        res = self.catalog_api.create_service(
+            service_id,
+            new_service.copy())
+        new_service['enabled'] = True
+        self.assertDictEqual(new_service, res)
+        self.catalog_api.get_service(service_id)
+        updated_service = copy.deepcopy(new_service)
+        updated_service['description'] = uuid.uuid4().hex
+        # update bypassing catalog api
+        self.catalog_api.driver.update_service(service_id, updated_service)
+        self.assertDictContainsSubset(new_service,
+                                      self.catalog_api.get_service(service_id))
+        self.catalog_api.get_service.invalidate(self.catalog_api, service_id)
+        self.assertDictContainsSubset(updated_service,
+                                      self.catalog_api.get_service(service_id))
+
+        # delete bypassing catalog api
+        self.catalog_api.driver.delete_service(service_id)
+        self.assertDictContainsSubset(updated_service,
+                                      self.catalog_api.get_service(service_id))
+        self.catalog_api.get_service.invalidate(self.catalog_api, service_id)
+        self.assertRaises(exception.ServiceNotFound,
+                          self.catalog_api.delete_service,
+                          service_id)
+        self.assertRaises(exception.ServiceNotFound,
+                          self.catalog_api.get_service,
+                          service_id)
+
+    @tests.skip_if_cache_disabled('catalog')
+    def test_invalidate_cache_when_updating_service(self):
+        service_id = uuid.uuid4().hex
+        new_service = {
+            'id': service_id,
+            'type': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+        }
+        self.catalog_api.create_service(
+            service_id,
+            new_service.copy())
+
+        # cache the service
+        self.catalog_api.get_service(service_id)
+
+        # update the service via catalog api
+        new_type = {'type': uuid.uuid4().hex}
+        self.catalog_api.update_service(service_id, new_type)
+
+        # assert that we can get the new service
+        current_service = self.catalog_api.get_service(service_id)
+        self.assertEqual(new_type['type'], current_service['type'])
+
+    def test_delete_service_with_endpoint(self):
+        # create a service
+        service = {
+            'id': uuid.uuid4().hex,
+            'type': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+        }
+        self.catalog_api.create_service(service['id'], service)
+
+        # create an endpoint attached to the service
+        endpoint = {
+            'id': uuid.uuid4().hex,
+            'region': uuid.uuid4().hex,
+            'interface': uuid.uuid4().hex[:8],
+            'url': uuid.uuid4().hex,
+            'service_id': service['id'],
+        }
+        self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+
+        # deleting the service should also delete the endpoint
+        self.catalog_api.delete_service(service['id'])
+        self.assertRaises(exception.EndpointNotFound,
+                          self.catalog_api.get_endpoint,
+                          endpoint['id'])
+        self.assertRaises(exception.EndpointNotFound,
+                          self.catalog_api.delete_endpoint,
+                          endpoint['id'])
+
+    def test_cache_layer_delete_service_with_endpoint(self):
+        service = {
+            'id': uuid.uuid4().hex,
+            'type': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+        }
+        self.catalog_api.create_service(service['id'], service)
+
+        # create an endpoint attached to the service
+        endpoint = {
+            'id': uuid.uuid4().hex,
+            'region_id': None,
+            'interface': uuid.uuid4().hex[:8],
+            'url': uuid.uuid4().hex,
+            'service_id': service['id'],
+        }
+        self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+        # cache the result
+        self.catalog_api.get_service(service['id'])
+        self.catalog_api.get_endpoint(endpoint['id'])
+        # delete the service bypassing catalog api
+        self.catalog_api.driver.delete_service(service['id'])
+        self.assertDictContainsSubset(endpoint,
+                                      self.catalog_api.
+                                      get_endpoint(endpoint['id']))
+        self.assertDictContainsSubset(service,
+                                      self.catalog_api.
+                                      get_service(service['id']))
+        self.catalog_api.get_endpoint.invalidate(self.catalog_api,
+                                                 endpoint['id'])
+        self.assertRaises(exception.EndpointNotFound,
+                          self.catalog_api.get_endpoint,
+                          endpoint['id'])
+        self.assertRaises(exception.EndpointNotFound,
+                          self.catalog_api.delete_endpoint,
+                          endpoint['id'])
+        # multiple endpoints associated with a service
+        second_endpoint = {
+            'id': uuid.uuid4().hex,
+            'region_id': None,
+            'interface': uuid.uuid4().hex[:8],
+            'url': uuid.uuid4().hex,
+            'service_id': service['id'],
+        }
+        self.catalog_api.create_service(service['id'], service)
+        self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+        self.catalog_api.create_endpoint(second_endpoint['id'],
+                                         second_endpoint)
+        self.catalog_api.delete_service(service['id'])
+        self.assertRaises(exception.EndpointNotFound,
+                          self.catalog_api.get_endpoint,
+                          endpoint['id'])
+        self.assertRaises(exception.EndpointNotFound,
+                          self.catalog_api.delete_endpoint,
+                          endpoint['id'])
+        self.assertRaises(exception.EndpointNotFound,
+                          self.catalog_api.get_endpoint,
+                          second_endpoint['id'])
+        self.assertRaises(exception.EndpointNotFound,
+                          self.catalog_api.delete_endpoint,
+                          second_endpoint['id'])
+
+    def test_get_service_404(self):
+        self.assertRaises(exception.ServiceNotFound,
+                          self.catalog_api.get_service,
+                          uuid.uuid4().hex)
+
+    def test_delete_service_404(self):
+        self.assertRaises(exception.ServiceNotFound,
+                          self.catalog_api.delete_service,
+                          uuid.uuid4().hex)
+
+    def test_create_endpoint_nonexistent_service(self):
+        endpoint = {
+            'id': uuid.uuid4().hex,
+            'service_id': uuid.uuid4().hex,
+        }
+        self.assertRaises(exception.ValidationError,
+                          self.catalog_api.create_endpoint,
+                          endpoint['id'],
+                          endpoint)
+
+    def test_update_endpoint_nonexistent_service(self):
+        dummy_service, enabled_endpoint, dummy_disabled_endpoint = (
+            self._create_endpoints())
+        new_endpoint = {
+            'service_id': uuid.uuid4().hex,
+        }
+        self.assertRaises(exception.ValidationError,
+                          self.catalog_api.update_endpoint,
+                          enabled_endpoint['id'],
+                          new_endpoint)
+
+    def test_create_endpoint_nonexistent_region(self):
+        service = {
+            'id': uuid.uuid4().hex,
+            'type': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+        }
+        self.catalog_api.create_service(service['id'], service.copy())
+
+        endpoint = {
+            'id': uuid.uuid4().hex,
+            'region_id': None,
+            'service_id': service['id'],
+            'interface': 'public',
+            'url': uuid.uuid4().hex,
+            'region_id': uuid.uuid4().hex,
+        }
+        self.assertRaises(exception.ValidationError,
+                          self.catalog_api.create_endpoint,
+                          endpoint['id'],
+                          endpoint)
+
+    def test_update_endpoint_nonexistent_region(self):
+        dummy_service, enabled_endpoint, dummy_disabled_endpoint = (
+            self._create_endpoints())
+        new_endpoint = {
+            'region_id': uuid.uuid4().hex,
+        }
+        self.assertRaises(exception.ValidationError,
+                          self.catalog_api.update_endpoint,
+                          enabled_endpoint['id'],
+                          new_endpoint)
+
+    def test_get_endpoint_404(self):
+        self.assertRaises(exception.EndpointNotFound,
+                          self.catalog_api.get_endpoint,
+                          uuid.uuid4().hex)
+
+    def test_delete_endpoint_404(self):
+        self.assertRaises(exception.EndpointNotFound,
+                          self.catalog_api.delete_endpoint,
+                          uuid.uuid4().hex)
+
+    def test_create_endpoint(self):
+        service = {
+            'id': uuid.uuid4().hex,
+            'type': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+        }
+        self.catalog_api.create_service(service['id'], service.copy())
+
+        endpoint = {
+            'id': uuid.uuid4().hex,
+            'region_id': None,
+            'service_id': service['id'],
+            'interface': 'public',
+            'url': uuid.uuid4().hex,
+        }
+        self.catalog_api.create_endpoint(endpoint['id'], endpoint.copy())
+
+    def test_update_endpoint(self):
+        dummy_service_ref, endpoint_ref, dummy_disabled_endpoint_ref = (
+            self._create_endpoints())
+        res = self.catalog_api.update_endpoint(endpoint_ref['id'],
+                                               {'interface': 'private'})
+        expected_endpoint = endpoint_ref.copy()
+        expected_endpoint['interface'] = 'private'
+        if self._legacy_endpoint_id_in_endpoint:
+            expected_endpoint['legacy_endpoint_id'] = None
+        if self._enabled_default_to_true_when_creating_endpoint:
+            expected_endpoint['enabled'] = True
+        self.assertDictEqual(expected_endpoint, res)
+
+    def _create_endpoints(self):
+        # Creates a service and 2 endpoints for the service in the same region.
+        # The 'public' interface is enabled and the 'internal' interface is
+        # disabled.
+
+        def create_endpoint(service_id, region, **kwargs):
+            id_ = uuid.uuid4().hex
+            ref = {
+                'id': id_,
+                'interface': 'public',
+                'region_id': region,
+                'service_id': service_id,
+                'url': 'http://localhost/%s' % uuid.uuid4().hex,
+            }
+            ref.update(kwargs)
+            self.catalog_api.create_endpoint(id_, ref)
+            return ref
+
+        # Create a service for use with the endpoints.
+        service_id = uuid.uuid4().hex
+        service_ref = {
+            'id': service_id,
+            'name': uuid.uuid4().hex,
+            'type': uuid.uuid4().hex,
+        }
+        self.catalog_api.create_service(service_id, service_ref)
+
+        region = {'id': uuid.uuid4().hex}
+        self.catalog_api.create_region(region)
+
+        # Create endpoints
+        enabled_endpoint_ref = create_endpoint(service_id, region['id'])
+        disabled_endpoint_ref = create_endpoint(
+            service_id, region['id'], enabled=False, interface='internal')
+
+        return service_ref, enabled_endpoint_ref, disabled_endpoint_ref
+
+    def test_get_catalog_endpoint_disabled(self):
+        """Get back only enabled endpoints when get the v2 catalog."""
+
+        service_ref, enabled_endpoint_ref, dummy_disabled_endpoint_ref = (
+            self._create_endpoints())
+
+        user_id = uuid.uuid4().hex
+        project_id = uuid.uuid4().hex
+        catalog = self.catalog_api.get_catalog(user_id, project_id)
+
+        exp_entry = {
+            'id': enabled_endpoint_ref['id'],
+            'name': service_ref['name'],
+            'publicURL': enabled_endpoint_ref['url'],
+        }
+
+        region = enabled_endpoint_ref['region_id']
+        self.assertEqual(exp_entry, catalog[region][service_ref['type']])
+
+    def test_get_v3_catalog_endpoint_disabled(self):
+        """Get back only enabled endpoints when get the v3 catalog."""
+
+        enabled_endpoint_ref = self._create_endpoints()[1]
+
+        user_id = uuid.uuid4().hex
+        project_id = uuid.uuid4().hex
+        catalog = self.catalog_api.get_v3_catalog(user_id, project_id)
+
+        endpoint_ids = [x['id'] for x in catalog[0]['endpoints']]
+        self.assertEqual([enabled_endpoint_ref['id']], endpoint_ids)
+
+    @tests.skip_if_cache_disabled('catalog')
+    def test_invalidate_cache_when_updating_endpoint(self):
+        service = {
+            'id': uuid.uuid4().hex,
+            'type': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+        }
+        self.catalog_api.create_service(service['id'], service)
+
+        # create an endpoint attached to the service
+        endpoint_id = uuid.uuid4().hex
+        endpoint = {
+            'id': endpoint_id,
+            'region_id': None,
+            'interface': uuid.uuid4().hex[:8],
+            'url': uuid.uuid4().hex,
+            'service_id': service['id'],
+        }
+        self.catalog_api.create_endpoint(endpoint_id, endpoint)
+
+        # cache the endpoint
+        self.catalog_api.get_endpoint(endpoint_id)
+
+        # update the endpoint via catalog api
+        new_url = {'url': uuid.uuid4().hex}
+        self.catalog_api.update_endpoint(endpoint_id, new_url)
+
+        # assert that we can get the new endpoint
+        current_endpoint = self.catalog_api.get_endpoint(endpoint_id)
+        self.assertEqual(new_url['url'], current_endpoint['url'])
+
+
+class PolicyTests(object):
+    def _new_policy_ref(self):
+        return {
+            'id': uuid.uuid4().hex,
+            'policy': uuid.uuid4().hex,
+            'type': uuid.uuid4().hex,
+            'endpoint_id': uuid.uuid4().hex,
+        }
+
+    def assertEqualPolicies(self, a, b):
+        self.assertEqual(a['id'], b['id'])
+        self.assertEqual(a['endpoint_id'], b['endpoint_id'])
+        self.assertEqual(a['policy'], b['policy'])
+        self.assertEqual(a['type'], b['type'])
+
+    def test_create(self):
+        ref = self._new_policy_ref()
+        res = self.policy_api.create_policy(ref['id'], ref)
+        self.assertEqualPolicies(ref, res)
+
+    def test_get(self):
+        ref = self._new_policy_ref()
+        res = self.policy_api.create_policy(ref['id'], ref)
+
+        res = self.policy_api.get_policy(ref['id'])
+        self.assertEqualPolicies(ref, res)
+
+    def test_list(self):
+        ref = self._new_policy_ref()
+        self.policy_api.create_policy(ref['id'], ref)
+
+        res = self.policy_api.list_policies()
+        res = [x for x in res if x['id'] == ref['id']][0]
+        self.assertEqualPolicies(ref, res)
+
+    def test_update(self):
+        ref = self._new_policy_ref()
+        self.policy_api.create_policy(ref['id'], ref)
+        orig = ref
+
+        ref = self._new_policy_ref()
+
+        # (cannot change policy ID)
+        self.assertRaises(exception.ValidationError,
+                          self.policy_api.update_policy,
+                          orig['id'],
+                          ref)
+
+        ref['id'] = orig['id']
+        res = self.policy_api.update_policy(orig['id'], ref)
+        self.assertEqualPolicies(ref, res)
+
+    def test_delete(self):
+        ref = self._new_policy_ref()
+        self.policy_api.create_policy(ref['id'], ref)
+
+        self.policy_api.delete_policy(ref['id'])
+        self.assertRaises(exception.PolicyNotFound,
+                          self.policy_api.delete_policy,
+                          ref['id'])
+        self.assertRaises(exception.PolicyNotFound,
+                          self.policy_api.get_policy,
+                          ref['id'])
+        res = self.policy_api.list_policies()
+        self.assertFalse(len([x for x in res if x['id'] == ref['id']]))
+
+    def test_get_policy_404(self):
+        self.assertRaises(exception.PolicyNotFound,
+                          self.policy_api.get_policy,
+                          uuid.uuid4().hex)
+
+    def test_update_policy_404(self):
+        ref = self._new_policy_ref()
+        self.assertRaises(exception.PolicyNotFound,
+                          self.policy_api.update_policy,
+                          ref['id'],
+                          ref)
+
+    def test_delete_policy_404(self):
+        self.assertRaises(exception.PolicyNotFound,
+                          self.policy_api.delete_policy,
+                          uuid.uuid4().hex)
+
+
+class InheritanceTests(object):
+
+    def test_inherited_role_grants_for_user(self):
+        """Test inherited user roles.
+
+        Test Plan:
+
+        - Enable OS-INHERIT extension
+        - Create 3 roles
+        - Create a domain, with a project and a user
+        - Check no roles yet exit
+        - Assign a direct user role to the project and a (non-inherited)
+          user role to the domain
+        - Get a list of effective roles - should only get the one direct role
+        - Now add an inherited user role to the domain
+        - Get a list of effective roles - should have two roles, one
+          direct and one by virtue of the inherited user role
+        - Also get effective roles for the domain - the role marked as
+          inherited should not show up
+
+        """
+        self.config_fixture.config(group='os_inherit', enabled=True)
+        role_list = []
+        for _ in range(3):
+            role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+            self.role_api.create_role(role['id'], role)
+            role_list.append(role)
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        user1 = {'name': uuid.uuid4().hex, 'domain_id': domain1['id'],
+                 'password': uuid.uuid4().hex, 'enabled': True}
+        user1 = self.identity_api.create_user(user1)
+        project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain1['id']}
+        self.resource_api.create_project(project1['id'], project1)
+
+        roles_ref = self.assignment_api.list_grants(
+            user_id=user1['id'],
+            project_id=project1['id'])
+        self.assertEqual(0, len(roles_ref))
+
+        # Create the first two roles - the domain one is not inherited
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         project_id=project1['id'],
+                                         role_id=role_list[0]['id'])
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role_list[1]['id'])
+
+        # Now get the effective roles for the user and project, this
+        # should only include the direct role assignment on the project
+        combined_list = self.assignment_api.get_roles_for_user_and_project(
+            user1['id'], project1['id'])
+        self.assertEqual(1, len(combined_list))
+        self.assertIn(role_list[0]['id'], combined_list)
+
+        # Now add an inherited role on the domain
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role_list[2]['id'],
+                                         inherited_to_projects=True)
+
+        # Now get the effective roles for the user and project again, this
+        # should now include the inherited role on the domain
+        combined_list = self.assignment_api.get_roles_for_user_and_project(
+            user1['id'], project1['id'])
+        self.assertEqual(2, len(combined_list))
+        self.assertIn(role_list[0]['id'], combined_list)
+        self.assertIn(role_list[2]['id'], combined_list)
+
+        # Finally, check that the inherited role does not appear as a valid
+        # directly assigned role on the domain itself
+        combined_role_list = self.assignment_api.get_roles_for_user_and_domain(
+            user1['id'], domain1['id'])
+        self.assertEqual(1, len(combined_role_list))
+        self.assertIn(role_list[1]['id'], combined_role_list)
+
+    def test_inherited_role_grants_for_group(self):
+        """Test inherited group roles.
+
+        Test Plan:
+
+        - Enable OS-INHERIT extension
+        - Create 4 roles
+        - Create a domain, with a project, user and two groups
+        - Make the user a member of both groups
+        - Check no roles yet exit
+        - Assign a direct user role to the project and a (non-inherited)
+          group role on the domain
+        - Get a list of effective roles - should only get the one direct role
+        - Now add two inherited group roles to the domain
+        - Get a list of effective roles - should have three roles, one
+          direct and two by virtue of inherited group roles
+
+        """
+        self.config_fixture.config(group='os_inherit', enabled=True)
+        role_list = []
+        for _ in range(4):
+            role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+            self.role_api.create_role(role['id'], role)
+            role_list.append(role)
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        user1 = {'name': uuid.uuid4().hex, 'domain_id': domain1['id'],
+                 'password': uuid.uuid4().hex, 'enabled': True}
+        user1 = self.identity_api.create_user(user1)
+        group1 = {'name': uuid.uuid4().hex, 'domain_id': domain1['id'],
+                  'enabled': True}
+        group1 = self.identity_api.create_group(group1)
+        group2 = {'name': uuid.uuid4().hex, 'domain_id': domain1['id'],
+                  'enabled': True}
+        group2 = self.identity_api.create_group(group2)
+        project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain1['id']}
+        self.resource_api.create_project(project1['id'], project1)
+
+        self.identity_api.add_user_to_group(user1['id'],
+                                            group1['id'])
+        self.identity_api.add_user_to_group(user1['id'],
+                                            group2['id'])
+
+        roles_ref = self.assignment_api.list_grants(
+            user_id=user1['id'],
+            project_id=project1['id'])
+        self.assertEqual(0, len(roles_ref))
+
+        # Create two roles - the domain one is not inherited
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         project_id=project1['id'],
+                                         role_id=role_list[0]['id'])
+        self.assignment_api.create_grant(group_id=group1['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role_list[1]['id'])
+
+        # Now get the effective roles for the user and project, this
+        # should only include the direct role assignment on the project
+        combined_list = self.assignment_api.get_roles_for_user_and_project(
+            user1['id'], project1['id'])
+        self.assertEqual(1, len(combined_list))
+        self.assertIn(role_list[0]['id'], combined_list)
+
+        # Now add to more group roles, both inherited, to the domain
+        self.assignment_api.create_grant(group_id=group2['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role_list[2]['id'],
+                                         inherited_to_projects=True)
+        self.assignment_api.create_grant(group_id=group2['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role_list[3]['id'],
+                                         inherited_to_projects=True)
+
+        # Now get the effective roles for the user and project again, this
+        # should now include the inherited roles on the domain
+        combined_list = self.assignment_api.get_roles_for_user_and_project(
+            user1['id'], project1['id'])
+        self.assertEqual(3, len(combined_list))
+        self.assertIn(role_list[0]['id'], combined_list)
+        self.assertIn(role_list[2]['id'], combined_list)
+        self.assertIn(role_list[3]['id'], combined_list)
+
+    def test_list_projects_for_user_with_inherited_grants(self):
+        """Test inherited user roles.
+
+        Test Plan:
+
+        - Enable OS-INHERIT extension
+        - Create a domain, with two projects and a user
+        - Assign an inherited user role on the domain, as well as a direct
+          user role to a separate project in a different domain
+        - Get a list of projects for user, should return all three projects
+
+        """
+        self.config_fixture.config(group='os_inherit', enabled=True)
+        domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain['id'], domain)
+        user1 = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+                 'domain_id': domain['id'], 'enabled': True}
+        user1 = self.identity_api.create_user(user1)
+        project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain['id']}
+        self.resource_api.create_project(project1['id'], project1)
+        project2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain['id']}
+        self.resource_api.create_project(project2['id'], project2)
+
+        # Create 2 grants, one on a project and one inherited grant
+        # on the domain
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         project_id=self.tenant_bar['id'],
+                                         role_id=self.role_member['id'])
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         domain_id=domain['id'],
+                                         role_id=self.role_admin['id'],
+                                         inherited_to_projects=True)
+        # Should get back all three projects, one by virtue of the direct
+        # grant, plus both projects in the domain
+        user_projects = self.assignment_api.list_projects_for_user(user1['id'])
+        self.assertEqual(3, len(user_projects))
+
+    def test_list_projects_for_user_with_inherited_user_project_grants(self):
+        """Test inherited role assignments for users on nested projects.
+
+        Test Plan:
+
+        - Enable OS-INHERIT extension
+        - Create a hierarchy of projects with one root and one leaf project
+        - Assign an inherited user role on root project
+        - Assign a non-inherited user role on root project
+        - Get a list of projects for user, should return both projects
+        - Disable OS-INHERIT extension
+        - Get a list of projects for user, should return only root project
+
+        """
+        # Enable OS-INHERIT extension
+        self.config_fixture.config(group='os_inherit', enabled=True)
+        root_project = {'id': uuid.uuid4().hex,
+                        'description': '',
+                        'domain_id': DEFAULT_DOMAIN_ID,
+                        'enabled': True,
+                        'name': uuid.uuid4().hex,
+                        'parent_id': None}
+        self.resource_api.create_project(root_project['id'], root_project)
+        leaf_project = {'id': uuid.uuid4().hex,
+                        'description': '',
+                        'domain_id': DEFAULT_DOMAIN_ID,
+                        'enabled': True,
+                        'name': uuid.uuid4().hex,
+                        'parent_id': root_project['id']}
+        self.resource_api.create_project(leaf_project['id'], leaf_project)
+
+        user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+                'domain_id': DEFAULT_DOMAIN_ID, 'enabled': True}
+        user = self.identity_api.create_user(user)
+
+        # Grant inherited user role
+        self.assignment_api.create_grant(user_id=user['id'],
+                                         project_id=root_project['id'],
+                                         role_id=self.role_admin['id'],
+                                         inherited_to_projects=True)
+        # Grant non-inherited user role
+        self.assignment_api.create_grant(user_id=user['id'],
+                                         project_id=root_project['id'],
+                                         role_id=self.role_member['id'])
+        # Should get back both projects: because the direct role assignment for
+        # the root project and inherited role assignment for leaf project
+        user_projects = self.assignment_api.list_projects_for_user(user['id'])
+        self.assertEqual(2, len(user_projects))
+        self.assertIn(root_project, user_projects)
+        self.assertIn(leaf_project, user_projects)
+
+        # Disable OS-INHERIT extension
+        self.config_fixture.config(group='os_inherit', enabled=False)
+        # Should get back just root project - due the direct role assignment
+        user_projects = self.assignment_api.list_projects_for_user(user['id'])
+        self.assertEqual(1, len(user_projects))
+        self.assertIn(root_project, user_projects)
+
+    def test_list_projects_for_user_with_inherited_group_grants(self):
+        """Test inherited group roles.
+
+        Test Plan:
+
+        - Enable OS-INHERIT extension
+        - Create two domains, each with two projects
+        - Create a user and group
+        - Make the user a member of the group
+        - Assign a user role two projects, an inherited
+          group role to one domain and an inherited regular role on
+          the other domain
+        - Get a list of projects for user, should return both pairs of projects
+          from the domain, plus the one separate project
+
+        """
+        self.config_fixture.config(group='os_inherit', enabled=True)
+        domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain['id'], domain)
+        domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain2['id'], domain2)
+        project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain['id']}
+        self.resource_api.create_project(project1['id'], project1)
+        project2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain['id']}
+        self.resource_api.create_project(project2['id'], project2)
+        project3 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain2['id']}
+        self.resource_api.create_project(project3['id'], project3)
+        project4 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain2['id']}
+        self.resource_api.create_project(project4['id'], project4)
+        user1 = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+                 'domain_id': domain['id'], 'enabled': True}
+        user1 = self.identity_api.create_user(user1)
+        group1 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
+        group1 = self.identity_api.create_group(group1)
+        self.identity_api.add_user_to_group(user1['id'], group1['id'])
+
+        # Create 4 grants:
+        # - one user grant on a project in domain2
+        # - one user grant on a project in the default domain
+        # - one inherited user grant on domain
+        # - one inherited group grant on domain2
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         project_id=project3['id'],
+                                         role_id=self.role_member['id'])
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         project_id=self.tenant_bar['id'],
+                                         role_id=self.role_member['id'])
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         domain_id=domain['id'],
+                                         role_id=self.role_admin['id'],
+                                         inherited_to_projects=True)
+        self.assignment_api.create_grant(group_id=group1['id'],
+                                         domain_id=domain2['id'],
+                                         role_id=self.role_admin['id'],
+                                         inherited_to_projects=True)
+        # Should get back all five projects, but without a duplicate for
+        # project3 (since it has both a direct user role and an inherited role)
+        user_projects = self.assignment_api.list_projects_for_user(user1['id'])
+        self.assertEqual(5, len(user_projects))
+
+    def test_list_projects_for_user_with_inherited_group_project_grants(self):
+        """Test inherited role assignments for groups on nested projects.
+
+        Test Plan:
+
+        - Enable OS-INHERIT extension
+        - Create a hierarchy of projects with one root and one leaf project
+        - Assign an inherited group role on root project
+        - Assign a non-inherited group role on root project
+        - Get a list of projects for user, should return both projects
+        - Disable OS-INHERIT extension
+        - Get a list of projects for user, should return only root project
+
+        """
+        self.config_fixture.config(group='os_inherit', enabled=True)
+        root_project = {'id': uuid.uuid4().hex,
+                        'description': '',
+                        'domain_id': DEFAULT_DOMAIN_ID,
+                        'enabled': True,
+                        'name': uuid.uuid4().hex,
+                        'parent_id': None}
+        self.resource_api.create_project(root_project['id'], root_project)
+        leaf_project = {'id': uuid.uuid4().hex,
+                        'description': '',
+                        'domain_id': DEFAULT_DOMAIN_ID,
+                        'enabled': True,
+                        'name': uuid.uuid4().hex,
+                        'parent_id': root_project['id']}
+        self.resource_api.create_project(leaf_project['id'], leaf_project)
+
+        user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+                'domain_id': DEFAULT_DOMAIN_ID, 'enabled': True}
+        user = self.identity_api.create_user(user)
+
+        group = {'name': uuid.uuid4().hex, 'domain_id': DEFAULT_DOMAIN_ID}
+        group = self.identity_api.create_group(group)
+        self.identity_api.add_user_to_group(user['id'], group['id'])
+
+        # Grant inherited group role
+        self.assignment_api.create_grant(group_id=group['id'],
+                                         project_id=root_project['id'],
+                                         role_id=self.role_admin['id'],
+                                         inherited_to_projects=True)
+        # Grant non-inherited group role
+        self.assignment_api.create_grant(group_id=group['id'],
+                                         project_id=root_project['id'],
+                                         role_id=self.role_member['id'])
+        # Should get back both projects: because the direct role assignment for
+        # the root project and inherited role assignment for leaf project
+        user_projects = self.assignment_api.list_projects_for_user(user['id'])
+        self.assertEqual(2, len(user_projects))
+        self.assertIn(root_project, user_projects)
+        self.assertIn(leaf_project, user_projects)
+
+        # Disable OS-INHERIT extension
+        self.config_fixture.config(group='os_inherit', enabled=False)
+        # Should get back just root project - due the direct role assignment
+        user_projects = self.assignment_api.list_projects_for_user(user['id'])
+        self.assertEqual(1, len(user_projects))
+        self.assertIn(root_project, user_projects)
+
+
+class FilterTests(filtering.FilterTests):
+    def test_list_entities_filtered(self):
+        for entity in ['user', 'group', 'project']:
+            # Create 20 entities
+            entity_list = self._create_test_data(entity, 20)
+
+            # Try filtering to get one an exact item out of the list
+            hints = driver_hints.Hints()
+            hints.add_filter('name', entity_list[10]['name'])
+            entities = self._list_entities(entity)(hints=hints)
+            self.assertEqual(1, len(entities))
+            self.assertEqual(entities[0]['id'], entity_list[10]['id'])
+            # Check the driver has removed the filter from the list hints
+            self.assertFalse(hints.get_exact_filter_by_name('name'))
+            self._delete_test_data(entity, entity_list)
+
+    def test_list_users_inexact_filtered(self):
+        # Create 20 users, some with specific names. We set the names at create
+        # time (rather than updating them), since the LDAP driver does not
+        # support name updates.
+        user_name_data = {
+            # user index: name for user
+            5: 'The',
+            6: 'The Ministry',
+            7: 'The Ministry of',
+            8: 'The Ministry of Silly',
+            9: 'The Ministry of Silly Walks',
+            # ...and one for useful case insensitivity testing
+            10: 'The ministry of silly walks OF'
+        }
+        user_list = self._create_test_data(
+            'user', 20, domain_id=DEFAULT_DOMAIN_ID, name_dict=user_name_data)
+
+        hints = driver_hints.Hints()
+        hints.add_filter('name', 'ministry', comparator='contains')
+        users = self.identity_api.list_users(hints=hints)
+        self.assertEqual(5, len(users))
+        self._match_with_list(users, user_list,
+                              list_start=6, list_end=11)
+        # TODO(henry-nash) Check inexact filter has been removed.
+
+        hints = driver_hints.Hints()
+        hints.add_filter('name', 'The', comparator='startswith')
+        users = self.identity_api.list_users(hints=hints)
+        self.assertEqual(6, len(users))
+        self._match_with_list(users, user_list,
+                              list_start=5, list_end=11)
+        # TODO(henry-nash) Check inexact filter has been removed.
+
+        hints = driver_hints.Hints()
+        hints.add_filter('name', 'of', comparator='endswith')
+        users = self.identity_api.list_users(hints=hints)
+        self.assertEqual(2, len(users))
+        # We can't assume we will get back the users in any particular order
+        self.assertIn(user_list[7]['id'], [users[0]['id'], users[1]['id']])
+        self.assertIn(user_list[10]['id'], [users[0]['id'], users[1]['id']])
+        # TODO(henry-nash) Check inexact filter has been removed.
+
+        # TODO(henry-nash): Add some case sensitive tests.  However,
+        # these would be hard to validate currently, since:
+        #
+        # For SQL, the issue is that MySQL 0.7, by default, is installed in
+        # case insensitive mode (which is what is run by default for our
+        # SQL backend tests).  For production deployments. OpenStack
+        # assumes a case sensitive database.  For these tests, therefore, we
+        # need to be able to check the sensitivity of the database so as to
+        # know whether to run case sensitive tests here.
+        #
+        # For LDAP/AD, although dependent on the schema being used, attributes
+        # are typically configured to be case aware, but not case sensitive.
+
+        self._delete_test_data('user', user_list)
+
+    def test_groups_for_user_filtered(self):
+        """Test use of filtering doesn't break groups_for_user listing.
+
+        Some backends may use filtering to achieve the list of groups for a
+        user, so test that it can combine a second filter.
+
+        Test Plan:
+
+        - Create 10 groups, some with names we can filter on
+        - Create 2 users
+        - Assign 1 of those users to most of the groups, including some of the
+          well known named ones
+        - Assign the other user to other groups as spoilers
+        - Ensure that when we list groups for users with a filter on the group
+          name, both restrictions have been enforced on what is returned.
+
+        """
+
+        number_of_groups = 10
+        group_name_data = {
+            # entity index: name for entity
+            5: 'The',
+            6: 'The Ministry',
+            9: 'The Ministry of Silly Walks',
+        }
+        group_list = self._create_test_data(
+            'group', number_of_groups,
+            domain_id=DEFAULT_DOMAIN_ID, name_dict=group_name_data)
+        user_list = self._create_test_data('user', 2)
+
+        for group in range(7):
+            # Create membership, including with two out of the three groups
+            # with well know names
+            self.identity_api.add_user_to_group(user_list[0]['id'],
+                                                group_list[group]['id'])
+        # ...and some spoiler memberships
+        for group in range(7, number_of_groups):
+            self.identity_api.add_user_to_group(user_list[1]['id'],
+                                                group_list[group]['id'])
+
+        hints = driver_hints.Hints()
+        hints.add_filter('name', 'The', comparator='startswith')
+        groups = self.identity_api.list_groups_for_user(
+            user_list[0]['id'], hints=hints)
+        # We should only get back 2 out of the 3 groups that start with 'The'
+        # hence showing that both "filters" have been applied
+        self.assertThat(len(groups), matchers.Equals(2))
+        self.assertIn(group_list[5]['id'], [groups[0]['id'], groups[1]['id']])
+        self.assertIn(group_list[6]['id'], [groups[0]['id'], groups[1]['id']])
+        self._delete_test_data('user', user_list)
+        self._delete_test_data('group', group_list)
+
+
+class LimitTests(filtering.FilterTests):
+    ENTITIES = ['user', 'group', 'project']
+
+    def setUp(self):
+        """Setup for Limit Test Cases."""
+
+        self.domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(self.domain1['id'], self.domain1)
+        self.addCleanup(self.clean_up_domain)
+
+        self.entity_lists = {}
+        self.domain1_entity_lists = {}
+
+        for entity in self.ENTITIES:
+            # Create 20 entities, 14 of which are in domain1
+            self.entity_lists[entity] = self._create_test_data(entity, 6)
+            self.domain1_entity_lists[entity] = self._create_test_data(
+                entity, 14, self.domain1['id'])
+        self.addCleanup(self.clean_up_entities)
+
+    def clean_up_domain(self):
+        """Clean up domain test data from Limit Test Cases."""
+
+        self.domain1['enabled'] = False
+        self.resource_api.update_domain(self.domain1['id'], self.domain1)
+        self.resource_api.delete_domain(self.domain1['id'])
+        del self.domain1
+
+    def clean_up_entities(self):
+        """Clean up entity test data from Limit Test Cases."""
+        for entity in self.ENTITIES:
+            self._delete_test_data(entity, self.entity_lists[entity])
+            self._delete_test_data(entity, self.domain1_entity_lists[entity])
+        del self.entity_lists
+        del self.domain1_entity_lists
+
+    def _test_list_entity_filtered_and_limited(self, entity):
+        self.config_fixture.config(list_limit=10)
+        # Should get back just 10 entities in domain1
+        hints = driver_hints.Hints()
+        hints.add_filter('domain_id', self.domain1['id'])
+        entities = self._list_entities(entity)(hints=hints)
+        self.assertEqual(hints.limit['limit'], len(entities))
+        self.assertTrue(hints.limit['truncated'])
+        self._match_with_list(entities, self.domain1_entity_lists[entity])
+
+        # Override with driver specific limit
+        if entity == 'project':
+            self.config_fixture.config(group='resource', list_limit=5)
+        else:
+            self.config_fixture.config(group='identity', list_limit=5)
+
+        # Should get back just 5 users in domain1
+        hints = driver_hints.Hints()
+        hints.add_filter('domain_id', self.domain1['id'])
+        entities = self._list_entities(entity)(hints=hints)
+        self.assertEqual(hints.limit['limit'], len(entities))
+        self._match_with_list(entities, self.domain1_entity_lists[entity])
+
+        # Finally, let's pretend we want to get the full list of entities,
+        # even with the limits set, as part of some internal calculation.
+        # Calling the API without a hints list should achieve this, and
+        # return at least the 20 entries we created (there may be other
+        # entities lying around created by other tests/setup).
+        entities = self._list_entities(entity)()
+        self.assertTrue(len(entities) >= 20)
+
+    def test_list_users_filtered_and_limited(self):
+        self._test_list_entity_filtered_and_limited('user')
+
+    def test_list_groups_filtered_and_limited(self):
+        self._test_list_entity_filtered_and_limited('group')
+
+    def test_list_projects_filtered_and_limited(self):
+        self._test_list_entity_filtered_and_limited('project')
diff --git a/keystone-moon/keystone/tests/unit/test_backend_endpoint_policy.py b/keystone-moon/keystone/tests/unit/test_backend_endpoint_policy.py
new file mode 100644 (file)
index 0000000..cc41d97
--- /dev/null
@@ -0,0 +1,247 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from testtools import matchers
+
+from keystone import exception
+
+
+class PolicyAssociationTests(object):
+
+    def _assert_correct_policy(self, endpoint, policy):
+        ref = (
+            self.endpoint_policy_api.get_policy_for_endpoint(endpoint['id']))
+        self.assertEqual(policy['id'], ref['id'])
+
+    def _assert_correct_endpoints(self, policy, endpoint_list):
+        endpoint_id_list = [ep['id'] for ep in endpoint_list]
+        endpoints = (
+            self.endpoint_policy_api.list_endpoints_for_policy(policy['id']))
+        self.assertThat(endpoints, matchers.HasLength(len(endpoint_list)))
+        for endpoint in endpoints:
+            self.assertIn(endpoint['id'], endpoint_id_list)
+
+    def load_sample_data(self):
+        """Create sample data to test policy associations.
+
+        The following data is created:
+
+        - 3 regions, in a hierarchy, 0 -> 1 -> 2 (where 0 is top)
+        - 3 services
+        - 6 endpoints, 2 in each region, with a mixture of services:
+          0 - region 0, Service 0
+          1 - region 0, Service 1
+          2 - region 1, Service 1
+          3 - region 1, Service 2
+          4 - region 2, Service 2
+          5 - region 2, Service 0
+
+        """
+
+        def new_endpoint(region_id, service_id):
+            endpoint = {'id': uuid.uuid4().hex, 'interface': 'test',
+                        'region_id': region_id, 'service_id': service_id,
+                        'url': '/url'}
+            self.endpoint.append(self.catalog_api.create_endpoint(
+                endpoint['id'], endpoint))
+
+        self.policy = []
+        self.endpoint = []
+        self.service = []
+        self.region = []
+        for i in range(3):
+            policy = {'id': uuid.uuid4().hex, 'type': uuid.uuid4().hex,
+                      'blob': {'data': uuid.uuid4().hex}}
+            self.policy.append(self.policy_api.create_policy(policy['id'],
+                                                             policy))
+            service = {'id': uuid.uuid4().hex, 'type': uuid.uuid4().hex}
+            self.service.append(self.catalog_api.create_service(service['id'],
+                                                                service))
+            region = {'id': uuid.uuid4().hex, 'description': uuid.uuid4().hex}
+            # Link the 3 regions together as a hierarchy, [0] at the top
+            if i != 0:
+                region['parent_region_id'] = self.region[i - 1]['id']
+            self.region.append(self.catalog_api.create_region(region))
+
+        new_endpoint(self.region[0]['id'], self.service[0]['id'])
+        new_endpoint(self.region[0]['id'], self.service[1]['id'])
+        new_endpoint(self.region[1]['id'], self.service[1]['id'])
+        new_endpoint(self.region[1]['id'], self.service[2]['id'])
+        new_endpoint(self.region[2]['id'], self.service[2]['id'])
+        new_endpoint(self.region[2]['id'], self.service[0]['id'])
+
+    def test_policy_to_endpoint_association_crud(self):
+        self.endpoint_policy_api.create_policy_association(
+            self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'])
+        self.endpoint_policy_api.check_policy_association(
+            self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'])
+        self.endpoint_policy_api.delete_policy_association(
+            self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'])
+        self.assertRaises(exception.NotFound,
+                          self.endpoint_policy_api.check_policy_association,
+                          self.policy[0]['id'],
+                          endpoint_id=self.endpoint[0]['id'])
+
+    def test_overwriting_policy_to_endpoint_association(self):
+        self.endpoint_policy_api.create_policy_association(
+            self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'])
+        self.endpoint_policy_api.create_policy_association(
+            self.policy[1]['id'], endpoint_id=self.endpoint[0]['id'])
+        self.assertRaises(exception.NotFound,
+                          self.endpoint_policy_api.check_policy_association,
+                          self.policy[0]['id'],
+                          endpoint_id=self.endpoint[0]['id'])
+        self.endpoint_policy_api.check_policy_association(
+            self.policy[1]['id'], endpoint_id=self.endpoint[0]['id'])
+
+    def test_invalid_policy_to_endpoint_association(self):
+        self.assertRaises(exception.InvalidPolicyAssociation,
+                          self.endpoint_policy_api.create_policy_association,
+                          self.policy[0]['id'])
+        self.assertRaises(exception.InvalidPolicyAssociation,
+                          self.endpoint_policy_api.create_policy_association,
+                          self.policy[0]['id'],
+                          endpoint_id=self.endpoint[0]['id'],
+                          region_id=self.region[0]['id'])
+        self.assertRaises(exception.InvalidPolicyAssociation,
+                          self.endpoint_policy_api.create_policy_association,
+                          self.policy[0]['id'],
+                          endpoint_id=self.endpoint[0]['id'],
+                          service_id=self.service[0]['id'])
+        self.assertRaises(exception.InvalidPolicyAssociation,
+                          self.endpoint_policy_api.create_policy_association,
+                          self.policy[0]['id'],
+                          region_id=self.region[0]['id'])
+
+    def test_policy_to_explicit_endpoint_association(self):
+        # Associate policy 0 with endpoint 0
+        self.endpoint_policy_api.create_policy_association(
+            self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'])
+        self._assert_correct_policy(self.endpoint[0], self.policy[0])
+        self._assert_correct_endpoints(self.policy[0], [self.endpoint[0]])
+        self.assertRaises(exception.NotFound,
+                          self.endpoint_policy_api.get_policy_for_endpoint,
+                          uuid.uuid4().hex)
+
+    def test_policy_to_service_association(self):
+        self.endpoint_policy_api.create_policy_association(
+            self.policy[0]['id'], service_id=self.service[0]['id'])
+        self.endpoint_policy_api.create_policy_association(
+            self.policy[1]['id'], service_id=self.service[1]['id'])
+
+        # Endpoints 0 and 5 are part of service 0
+        self._assert_correct_policy(self.endpoint[0], self.policy[0])
+        self._assert_correct_policy(self.endpoint[5], self.policy[0])
+        self._assert_correct_endpoints(
+            self.policy[0], [self.endpoint[0], self.endpoint[5]])
+
+        # Endpoints 1 and 2 are part of service 1
+        self._assert_correct_policy(self.endpoint[1], self.policy[1])
+        self._assert_correct_policy(self.endpoint[2], self.policy[1])
+        self._assert_correct_endpoints(
+            self.policy[1], [self.endpoint[1], self.endpoint[2]])
+
+    def test_policy_to_region_and_service_association(self):
+        self.endpoint_policy_api.create_policy_association(
+            self.policy[0]['id'], service_id=self.service[0]['id'],
+            region_id=self.region[0]['id'])
+        self.endpoint_policy_api.create_policy_association(
+            self.policy[1]['id'], service_id=self.service[1]['id'],
+            region_id=self.region[1]['id'])
+        self.endpoint_policy_api.create_policy_association(
+            self.policy[2]['id'], service_id=self.service[2]['id'],
+            region_id=self.region[2]['id'])
+
+        # Endpoint 0 is in region 0 with service 0, so should get policy 0
+        self._assert_correct_policy(self.endpoint[0], self.policy[0])
+        # Endpoint 5 is in Region 2 with service 0, so should also get
+        # policy 0 by searching up the tree to Region 0
+        self._assert_correct_policy(self.endpoint[5], self.policy[0])
+
+        # Looking the other way round, policy 2 should only be in use by
+        # endpoint 4, since that's the only endpoint in region 2 with the
+        # correct service
+        self._assert_correct_endpoints(
+            self.policy[2], [self.endpoint[4]])
+        # Policy 1 should only be in use by endpoint 2, since that's the only
+        # endpoint in region 1 (and region 2 below it) with the correct service
+        self._assert_correct_endpoints(
+            self.policy[1], [self.endpoint[2]])
+        # Policy 0 should be in use by endpoint 0, as well as 5 (since 5 is
+        # of the correct service and in region 2 below it)
+        self._assert_correct_endpoints(
+            self.policy[0], [self.endpoint[0], self.endpoint[5]])
+
+    def test_delete_association_by_entity(self):
+        self.endpoint_policy_api.create_policy_association(
+            self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'])
+        self.endpoint_policy_api.delete_association_by_endpoint(
+            self.endpoint[0]['id'])
+        self.assertRaises(exception.NotFound,
+                          self.endpoint_policy_api.check_policy_association,
+                          self.policy[0]['id'],
+                          endpoint_id=self.endpoint[0]['id'])
+        # Make sure deleting it again is silent - since this method is used
+        # in response to notifications by the controller.
+        self.endpoint_policy_api.delete_association_by_endpoint(
+            self.endpoint[0]['id'])
+
+        # Now try with service - ensure both combined region & service
+        # associations and explicit service ones are removed
+        self.endpoint_policy_api.create_policy_association(
+            self.policy[0]['id'], service_id=self.service[0]['id'],
+            region_id=self.region[0]['id'])
+        self.endpoint_policy_api.create_policy_association(
+            self.policy[1]['id'], service_id=self.service[0]['id'],
+            region_id=self.region[1]['id'])
+        self.endpoint_policy_api.create_policy_association(
+            self.policy[0]['id'], service_id=self.service[0]['id'])
+
+        self.endpoint_policy_api.delete_association_by_service(
+            self.service[0]['id'])
+
+        self.assertRaises(exception.NotFound,
+                          self.endpoint_policy_api.check_policy_association,
+                          self.policy[0]['id'],
+                          service_id=self.service[0]['id'],
+                          region_id=self.region[0]['id'])
+        self.assertRaises(exception.NotFound,
+                          self.endpoint_policy_api.check_policy_association,
+                          self.policy[1]['id'],
+                          service_id=self.service[0]['id'],
+                          region_id=self.region[1]['id'])
+        self.assertRaises(exception.NotFound,
+                          self.endpoint_policy_api.check_policy_association,
+                          self.policy[0]['id'],
+                          service_id=self.service[0]['id'])
+
+        # Finally, check delete by region
+        self.endpoint_policy_api.create_policy_association(
+            self.policy[0]['id'], service_id=self.service[0]['id'],
+            region_id=self.region[0]['id'])
+
+        self.endpoint_policy_api.delete_association_by_region(
+            self.region[0]['id'])
+
+        self.assertRaises(exception.NotFound,
+                          self.endpoint_policy_api.check_policy_association,
+                          self.policy[0]['id'],
+                          service_id=self.service[0]['id'],
+                          region_id=self.region[0]['id'])
+        self.assertRaises(exception.NotFound,
+                          self.endpoint_policy_api.check_policy_association,
+                          self.policy[0]['id'],
+                          service_id=self.service[0]['id'])
diff --git a/keystone-moon/keystone/tests/unit/test_backend_endpoint_policy_sql.py b/keystone-moon/keystone/tests/unit/test_backend_endpoint_policy_sql.py
new file mode 100644 (file)
index 0000000..dab0285
--- /dev/null
@@ -0,0 +1,37 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import sql
+from keystone.tests.unit import test_backend_endpoint_policy
+from keystone.tests.unit import test_backend_sql
+
+
+class SqlPolicyAssociationTable(test_backend_sql.SqlModels):
+    """Set of tests for checking SQL Policy Association Mapping."""
+
+    def test_policy_association_mapping(self):
+        cols = (('policy_id', sql.String, 64),
+                ('endpoint_id', sql.String, 64),
+                ('service_id', sql.String, 64),
+                ('region_id', sql.String, 64))
+        self.assertExpectedSchema('policy_association', cols)
+
+
+class SqlPolicyAssociationTests(
+    test_backend_sql.SqlTests,
+        test_backend_endpoint_policy.PolicyAssociationTests):
+
+    def load_fixtures(self, fixtures):
+        super(SqlPolicyAssociationTests, self).load_fixtures(fixtures)
+        self.load_sample_data()
diff --git a/keystone-moon/keystone/tests/unit/test_backend_federation_sql.py b/keystone-moon/keystone/tests/unit/test_backend_federation_sql.py
new file mode 100644 (file)
index 0000000..48ebad6
--- /dev/null
@@ -0,0 +1,46 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import sql
+from keystone.tests.unit import test_backend_sql
+
+
+class SqlFederation(test_backend_sql.SqlModels):
+    """Set of tests for checking SQL Federation."""
+
+    def test_identity_provider(self):
+        cols = (('id', sql.String, 64),
+                ('remote_id', sql.String, 256),
+                ('enabled', sql.Boolean, None),
+                ('description', sql.Text, None))
+        self.assertExpectedSchema('identity_provider', cols)
+
+    def test_federated_protocol(self):
+        cols = (('id', sql.String, 64),
+                ('idp_id', sql.String, 64),
+                ('mapping_id', sql.String, 64))
+        self.assertExpectedSchema('federation_protocol', cols)
+
+    def test_mapping(self):
+        cols = (('id', sql.String, 64),
+                ('rules', sql.JsonBlob, None))
+        self.assertExpectedSchema('mapping', cols)
+
+    def test_service_provider(self):
+        cols = (('auth_url', sql.String, 256),
+                ('id', sql.String, 64),
+                ('enabled', sql.Boolean, None),
+                ('description', sql.Text, None),
+                ('sp_url', sql.String, 256))
+        self.assertExpectedSchema('service_provider', cols)
diff --git a/keystone-moon/keystone/tests/unit/test_backend_id_mapping_sql.py b/keystone-moon/keystone/tests/unit/test_backend_id_mapping_sql.py
new file mode 100644 (file)
index 0000000..6b691e5
--- /dev/null
@@ -0,0 +1,197 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from testtools import matchers
+
+from keystone.common import sql
+from keystone.identity.mapping_backends import mapping
+from keystone.tests.unit import identity_mapping as mapping_sql
+from keystone.tests.unit import test_backend_sql
+
+
+class SqlIDMappingTable(test_backend_sql.SqlModels):
+    """Set of tests for checking SQL Identity ID Mapping."""
+
+    def test_id_mapping(self):
+        cols = (('public_id', sql.String, 64),
+                ('domain_id', sql.String, 64),
+                ('local_id', sql.String, 64),
+                ('entity_type', sql.Enum, None))
+        self.assertExpectedSchema('id_mapping', cols)
+
+
+class SqlIDMapping(test_backend_sql.SqlTests):
+
+    def setUp(self):
+        super(SqlIDMapping, self).setUp()
+        self.load_sample_data()
+
+    def load_sample_data(self):
+        self.addCleanup(self.clean_sample_data)
+        domainA = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.domainA = self.resource_api.create_domain(domainA['id'], domainA)
+        domainB = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.domainB = self.resource_api.create_domain(domainB['id'], domainB)
+
+    def clean_sample_data(self):
+        if hasattr(self, 'domainA'):
+            self.domainA['enabled'] = False
+            self.resource_api.update_domain(self.domainA['id'], self.domainA)
+            self.resource_api.delete_domain(self.domainA['id'])
+        if hasattr(self, 'domainB'):
+            self.domainB['enabled'] = False
+            self.resource_api.update_domain(self.domainB['id'], self.domainB)
+            self.resource_api.delete_domain(self.domainB['id'])
+
+    def test_invalid_public_key(self):
+        self.assertIsNone(self.id_mapping_api.get_id_mapping(uuid.uuid4().hex))
+
+    def test_id_mapping_crud(self):
+        initial_mappings = len(mapping_sql.list_id_mappings())
+        local_id1 = uuid.uuid4().hex
+        local_id2 = uuid.uuid4().hex
+        local_entity1 = {'domain_id': self.domainA['id'],
+                         'local_id': local_id1,
+                         'entity_type': mapping.EntityType.USER}
+        local_entity2 = {'domain_id': self.domainB['id'],
+                         'local_id': local_id2,
+                         'entity_type': mapping.EntityType.GROUP}
+
+        # Check no mappings for the new local entities
+        self.assertIsNone(self.id_mapping_api.get_public_id(local_entity1))
+        self.assertIsNone(self.id_mapping_api.get_public_id(local_entity2))
+
+        # Create the new mappings and then read them back
+        public_id1 = self.id_mapping_api.create_id_mapping(local_entity1)
+        public_id2 = self.id_mapping_api.create_id_mapping(local_entity2)
+        self.assertThat(mapping_sql.list_id_mappings(),
+                        matchers.HasLength(initial_mappings + 2))
+        self.assertEqual(
+            public_id1, self.id_mapping_api.get_public_id(local_entity1))
+        self.assertEqual(
+            public_id2, self.id_mapping_api.get_public_id(local_entity2))
+
+        local_id_ref = self.id_mapping_api.get_id_mapping(public_id1)
+        self.assertEqual(self.domainA['id'], local_id_ref['domain_id'])
+        self.assertEqual(local_id1, local_id_ref['local_id'])
+        self.assertEqual(mapping.EntityType.USER, local_id_ref['entity_type'])
+        # Check we have really created a new external ID
+        self.assertNotEqual(local_id1, public_id1)
+
+        local_id_ref = self.id_mapping_api.get_id_mapping(public_id2)
+        self.assertEqual(self.domainB['id'], local_id_ref['domain_id'])
+        self.assertEqual(local_id2, local_id_ref['local_id'])
+        self.assertEqual(mapping.EntityType.GROUP, local_id_ref['entity_type'])
+        # Check we have really created a new external ID
+        self.assertNotEqual(local_id2, public_id2)
+
+        # Create another mappings, this time specifying a public ID to use
+        new_public_id = uuid.uuid4().hex
+        public_id3 = self.id_mapping_api.create_id_mapping(
+            {'domain_id': self.domainB['id'], 'local_id': local_id2,
+             'entity_type': mapping.EntityType.USER},
+            public_id=new_public_id)
+        self.assertEqual(new_public_id, public_id3)
+        self.assertThat(mapping_sql.list_id_mappings(),
+                        matchers.HasLength(initial_mappings + 3))
+
+        # Delete the mappings we created, and make sure the mapping count
+        # goes back to where it was
+        self.id_mapping_api.delete_id_mapping(public_id1)
+        self.id_mapping_api.delete_id_mapping(public_id2)
+        self.id_mapping_api.delete_id_mapping(public_id3)
+        self.assertThat(mapping_sql.list_id_mappings(),
+                        matchers.HasLength(initial_mappings))
+
+    def test_id_mapping_handles_unicode(self):
+        initial_mappings = len(mapping_sql.list_id_mappings())
+        local_id = u'fäké1'
+        local_entity = {'domain_id': self.domainA['id'],
+                        'local_id': local_id,
+                        'entity_type': mapping.EntityType.USER}
+
+        # Check no mappings for the new local entity
+        self.assertIsNone(self.id_mapping_api.get_public_id(local_entity))
+
+        # Create the new mapping and then read it back
+        public_id = self.id_mapping_api.create_id_mapping(local_entity)
+        self.assertThat(mapping_sql.list_id_mappings(),
+                        matchers.HasLength(initial_mappings + 1))
+        self.assertEqual(
+            public_id, self.id_mapping_api.get_public_id(local_entity))
+
+    def test_delete_public_id_is_silent(self):
+        # Test that deleting an invalid public key is silent
+        self.id_mapping_api.delete_id_mapping(uuid.uuid4().hex)
+
+    def test_purge_mappings(self):
+        initial_mappings = len(mapping_sql.list_id_mappings())
+        local_id1 = uuid.uuid4().hex
+        local_id2 = uuid.uuid4().hex
+        local_id3 = uuid.uuid4().hex
+        local_id4 = uuid.uuid4().hex
+        local_id5 = uuid.uuid4().hex
+
+        # Create five mappings,two in domainA, three in domainB
+        self.id_mapping_api.create_id_mapping(
+            {'domain_id': self.domainA['id'], 'local_id': local_id1,
+             'entity_type': mapping.EntityType.USER})
+        self.id_mapping_api.create_id_mapping(
+            {'domain_id': self.domainA['id'], 'local_id': local_id2,
+             'entity_type': mapping.EntityType.USER})
+        public_id3 = self.id_mapping_api.create_id_mapping(
+            {'domain_id': self.domainB['id'], 'local_id': local_id3,
+             'entity_type': mapping.EntityType.GROUP})
+        public_id4 = self.id_mapping_api.create_id_mapping(
+            {'domain_id': self.domainB['id'], 'local_id': local_id4,
+             'entity_type': mapping.EntityType.USER})
+        public_id5 = self.id_mapping_api.create_id_mapping(
+            {'domain_id': self.domainB['id'], 'local_id': local_id5,
+             'entity_type': mapping.EntityType.USER})
+
+        self.assertThat(mapping_sql.list_id_mappings(),
+                        matchers.HasLength(initial_mappings + 5))
+
+        # Purge mappings for domainA, should be left with those in B
+        self.id_mapping_api.purge_mappings(
+            {'domain_id': self.domainA['id']})
+        self.assertThat(mapping_sql.list_id_mappings(),
+                        matchers.HasLength(initial_mappings + 3))
+        self.id_mapping_api.get_id_mapping(public_id3)
+        self.id_mapping_api.get_id_mapping(public_id4)
+        self.id_mapping_api.get_id_mapping(public_id5)
+
+        # Purge mappings for type Group, should purge one more
+        self.id_mapping_api.purge_mappings(
+            {'entity_type': mapping.EntityType.GROUP})
+        self.assertThat(mapping_sql.list_id_mappings(),
+                        matchers.HasLength(initial_mappings + 2))
+        self.id_mapping_api.get_id_mapping(public_id4)
+        self.id_mapping_api.get_id_mapping(public_id5)
+
+        # Purge mapping for a specific local identifier
+        self.id_mapping_api.purge_mappings(
+            {'domain_id': self.domainB['id'], 'local_id': local_id4,
+             'entity_type': mapping.EntityType.USER})
+        self.assertThat(mapping_sql.list_id_mappings(),
+                        matchers.HasLength(initial_mappings + 1))
+        self.id_mapping_api.get_id_mapping(public_id5)
+
+        # Purge mappings the remaining mappings
+        self.id_mapping_api.purge_mappings({})
+        self.assertThat(mapping_sql.list_id_mappings(),
+                        matchers.HasLength(initial_mappings))
diff --git a/keystone-moon/keystone/tests/unit/test_backend_kvs.py b/keystone-moon/keystone/tests/unit/test_backend_kvs.py
new file mode 100644 (file)
index 0000000..c0997ad
--- /dev/null
@@ -0,0 +1,172 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import datetime
+import uuid
+
+from oslo_config import cfg
+from oslo_utils import timeutils
+import six
+
+from keystone import exception
+from keystone.tests import unit as tests
+from keystone.tests.unit import test_backend
+
+
+CONF = cfg.CONF
+
+
+class KvsToken(tests.TestCase, test_backend.TokenTests):
+    def setUp(self):
+        super(KvsToken, self).setUp()
+        self.load_backends()
+
+    def test_flush_expired_token(self):
+        self.assertRaises(
+            exception.NotImplemented,
+            self.token_provider_api._persistence.flush_expired_tokens)
+
+    def _update_user_token_index_direct(self, user_key, token_id, new_data):
+        persistence = self.token_provider_api._persistence
+        token_list = persistence.driver._get_user_token_list_with_expiry(
+            user_key)
+        # Update the user-index so that the expires time is _actually_ expired
+        # since we do not do an explicit get on the token, we only reference
+        # the data in the user index (to save extra round-trips to the kvs
+        # backend).
+        for i, data in enumerate(token_list):
+            if data[0] == token_id:
+                token_list[i] = new_data
+                break
+        self.token_provider_api._persistence.driver._store.set(user_key,
+                                                               token_list)
+
+    def test_cleanup_user_index_on_create(self):
+        user_id = six.text_type(uuid.uuid4().hex)
+        valid_token_id, data = self.create_token_sample_data(user_id=user_id)
+        expired_token_id, expired_data = self.create_token_sample_data(
+            user_id=user_id)
+
+        expire_delta = datetime.timedelta(seconds=86400)
+
+        # NOTE(morganfainberg): Directly access the data cache since we need to
+        # get expired tokens as well as valid tokens.
+        token_persistence = self.token_provider_api._persistence
+        user_key = token_persistence.driver._prefix_user_id(user_id)
+        user_token_list = token_persistence.driver._store.get(user_key)
+        valid_token_ref = token_persistence.get_token(valid_token_id)
+        expired_token_ref = token_persistence.get_token(expired_token_id)
+        expected_user_token_list = [
+            (valid_token_id, timeutils.isotime(valid_token_ref['expires'],
+                                               subsecond=True)),
+            (expired_token_id, timeutils.isotime(expired_token_ref['expires'],
+                                                 subsecond=True))]
+        self.assertEqual(expected_user_token_list, user_token_list)
+        new_expired_data = (expired_token_id,
+                            timeutils.isotime(
+                                (timeutils.utcnow() - expire_delta),
+                                subsecond=True))
+        self._update_user_token_index_direct(user_key, expired_token_id,
+                                             new_expired_data)
+        valid_token_id_2, valid_data_2 = self.create_token_sample_data(
+            user_id=user_id)
+        valid_token_ref_2 = token_persistence.get_token(valid_token_id_2)
+        expected_user_token_list = [
+            (valid_token_id, timeutils.isotime(valid_token_ref['expires'],
+                                               subsecond=True)),
+            (valid_token_id_2, timeutils.isotime(valid_token_ref_2['expires'],
+                                                 subsecond=True))]
+        user_token_list = token_persistence.driver._store.get(user_key)
+        self.assertEqual(expected_user_token_list, user_token_list)
+
+        # Test that revoked tokens are removed from the list on create.
+        token_persistence.delete_token(valid_token_id_2)
+        new_token_id, data = self.create_token_sample_data(user_id=user_id)
+        new_token_ref = token_persistence.get_token(new_token_id)
+        expected_user_token_list = [
+            (valid_token_id, timeutils.isotime(valid_token_ref['expires'],
+                                               subsecond=True)),
+            (new_token_id, timeutils.isotime(new_token_ref['expires'],
+                                             subsecond=True))]
+        user_token_list = token_persistence.driver._store.get(user_key)
+        self.assertEqual(expected_user_token_list, user_token_list)
+
+
+class KvsCatalog(tests.TestCase, test_backend.CatalogTests):
+    def setUp(self):
+        super(KvsCatalog, self).setUp()
+        self.load_backends()
+        self._load_fake_catalog()
+
+    def config_overrides(self):
+        super(KvsCatalog, self).config_overrides()
+        self.config_fixture.config(
+            group='catalog',
+            driver='keystone.catalog.backends.kvs.Catalog')
+
+    def _load_fake_catalog(self):
+        self.catalog_foobar = self.catalog_api.driver._create_catalog(
+            'foo', 'bar',
+            {'RegionFoo': {'service_bar': {'foo': 'bar'}}})
+
+    def test_get_catalog_404(self):
+        # FIXME(dolph): this test should be moved up to test_backend
+        # FIXME(dolph): exceptions should be UserNotFound and ProjectNotFound
+        self.assertRaises(exception.NotFound,
+                          self.catalog_api.get_catalog,
+                          uuid.uuid4().hex,
+                          'bar')
+
+        self.assertRaises(exception.NotFound,
+                          self.catalog_api.get_catalog,
+                          'foo',
+                          uuid.uuid4().hex)
+
+    def test_get_catalog(self):
+        catalog_ref = self.catalog_api.get_catalog('foo', 'bar')
+        self.assertDictEqual(catalog_ref, self.catalog_foobar)
+
+    def test_get_catalog_endpoint_disabled(self):
+        # This test doesn't apply to KVS because with the KVS backend the
+        # application creates the catalog (including the endpoints) for each
+        # user and project. Whether endpoints are enabled or disabled isn't
+        # a consideration.
+        f = super(KvsCatalog, self).test_get_catalog_endpoint_disabled
+        self.assertRaises(exception.NotFound, f)
+
+    def test_get_v3_catalog_endpoint_disabled(self):
+        # There's no need to have disabled endpoints in the kvs catalog. Those
+        # endpoints should just be removed from the store. This just tests
+        # what happens currently when the super impl is called.
+        f = super(KvsCatalog, self).test_get_v3_catalog_endpoint_disabled
+        self.assertRaises(exception.NotFound, f)
+
+    def test_list_regions_filtered_by_parent_region_id(self):
+        self.skipTest('KVS backend does not support hints')
+
+    def test_service_filtering(self):
+        self.skipTest("kvs backend doesn't support filtering")
+
+
+class KvsTokenCacheInvalidation(tests.TestCase,
+                                test_backend.TokenCacheInvalidation):
+    def setUp(self):
+        super(KvsTokenCacheInvalidation, self).setUp()
+        self.load_backends()
+        self._create_test_data()
+
+    def config_overrides(self):
+        super(KvsTokenCacheInvalidation, self).config_overrides()
+        self.config_fixture.config(
+            group='token',
+            driver='keystone.token.persistence.backends.kvs.Token')
diff --git a/keystone-moon/keystone/tests/unit/test_backend_ldap.py b/keystone-moon/keystone/tests/unit/test_backend_ldap.py
new file mode 100644 (file)
index 0000000..1011980
--- /dev/null
@@ -0,0 +1,3049 @@
+# -*- coding: utf-8 -*-
+# Copyright 2012 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import uuid
+
+import ldap
+import mock
+from oslo_config import cfg
+from testtools import matchers
+
+from keystone.common import cache
+from keystone.common import ldap as common_ldap
+from keystone.common.ldap import core as common_ldap_core
+from keystone.common import sql
+from keystone import exception
+from keystone import identity
+from keystone.identity.mapping_backends import mapping as map
+from keystone import resource
+from keystone.tests import unit as tests
+from keystone.tests.unit import default_fixtures
+from keystone.tests.unit import fakeldap
+from keystone.tests.unit import identity_mapping as mapping_sql
+from keystone.tests.unit.ksfixtures import database
+from keystone.tests.unit import test_backend
+
+
+CONF = cfg.CONF
+
+
+def create_group_container(identity_api):
+    # Create the groups base entry (ou=Groups,cn=example,cn=com)
+    group_api = identity_api.driver.group
+    conn = group_api.get_connection()
+    dn = 'ou=Groups,cn=example,cn=com'
+    conn.add_s(dn, [('objectclass', ['organizationalUnit']),
+                    ('ou', ['Groups'])])
+
+
+class BaseLDAPIdentity(test_backend.IdentityTests):
+
+    def setUp(self):
+        super(BaseLDAPIdentity, self).setUp()
+        self.clear_database()
+
+        common_ldap.register_handler('fake://', fakeldap.FakeLdap)
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+
+        self.addCleanup(common_ldap_core._HANDLERS.clear)
+
+    def _get_domain_fixture(self):
+        """Domains in LDAP are read-only, so just return the static one."""
+        return self.resource_api.get_domain(CONF.identity.default_domain_id)
+
+    def clear_database(self):
+        for shelf in fakeldap.FakeShelves:
+            fakeldap.FakeShelves[shelf].clear()
+
+    def reload_backends(self, domain_id):
+        # Only one backend unless we are using separate domain backends
+        self.load_backends()
+
+    def get_config(self, domain_id):
+        # Only one conf structure unless we are using separate domain backends
+        return CONF
+
+    def config_overrides(self):
+        super(BaseLDAPIdentity, self).config_overrides()
+        self.config_fixture.config(
+            group='identity',
+            driver='keystone.identity.backends.ldap.Identity')
+
+    def config_files(self):
+        config_files = super(BaseLDAPIdentity, self).config_files()
+        config_files.append(tests.dirs.tests_conf('backend_ldap.conf'))
+        return config_files
+
+    def get_user_enabled_vals(self, user):
+        user_dn = (
+            self.identity_api.driver.user._id_to_dn_string(user['id']))
+        enabled_attr_name = CONF.ldap.user_enabled_attribute
+
+        ldap_ = self.identity_api.driver.user.get_connection()
+        res = ldap_.search_s(user_dn,
+                             ldap.SCOPE_BASE,
+                             u'(sn=%s)' % user['name'])
+        if enabled_attr_name in res[0][1]:
+            return res[0][1][enabled_attr_name]
+        else:
+            return None
+
+    def test_build_tree(self):
+        """Regression test for building the tree names
+        """
+        user_api = identity.backends.ldap.UserApi(CONF)
+        self.assertTrue(user_api)
+        self.assertEqual("ou=Users,%s" % CONF.ldap.suffix, user_api.tree_dn)
+
+    def test_configurable_allowed_user_actions(self):
+        user = {'name': u'fäké1',
+                'password': u'fäképass1',
+                'domain_id': CONF.identity.default_domain_id,
+                'tenants': ['bar']}
+        user = self.identity_api.create_user(user)
+        self.identity_api.get_user(user['id'])
+
+        user['password'] = u'fäképass2'
+        self.identity_api.update_user(user['id'], user)
+
+        self.identity_api.delete_user(user['id'])
+        self.assertRaises(exception.UserNotFound,
+                          self.identity_api.get_user,
+                          user['id'])
+
+    def test_configurable_forbidden_user_actions(self):
+        conf = self.get_config(CONF.identity.default_domain_id)
+        conf.ldap.user_allow_create = False
+        conf.ldap.user_allow_update = False
+        conf.ldap.user_allow_delete = False
+        self.reload_backends(CONF.identity.default_domain_id)
+
+        user = {'name': u'fäké1',
+                'password': u'fäképass1',
+                'domain_id': CONF.identity.default_domain_id,
+                'tenants': ['bar']}
+        self.assertRaises(exception.ForbiddenAction,
+                          self.identity_api.create_user,
+                          user)
+
+        self.user_foo['password'] = u'fäképass2'
+        self.assertRaises(exception.ForbiddenAction,
+                          self.identity_api.update_user,
+                          self.user_foo['id'],
+                          self.user_foo)
+
+        self.assertRaises(exception.ForbiddenAction,
+                          self.identity_api.delete_user,
+                          self.user_foo['id'])
+
+    def test_configurable_forbidden_create_existing_user(self):
+        conf = self.get_config(CONF.identity.default_domain_id)
+        conf.ldap.user_allow_create = False
+        self.reload_backends(CONF.identity.default_domain_id)
+
+        self.assertRaises(exception.ForbiddenAction,
+                          self.identity_api.create_user,
+                          self.user_foo)
+
+    def test_user_filter(self):
+        user_ref = self.identity_api.get_user(self.user_foo['id'])
+        self.user_foo.pop('password')
+        self.assertDictEqual(user_ref, self.user_foo)
+
+        conf = self.get_config(user_ref['domain_id'])
+        conf.ldap.user_filter = '(CN=DOES_NOT_MATCH)'
+        self.reload_backends(user_ref['domain_id'])
+        # invalidate the cache if the result is cached.
+        self.identity_api.get_user.invalidate(self.identity_api,
+                                              self.user_foo['id'])
+        self.assertRaises(exception.UserNotFound,
+                          self.identity_api.get_user,
+                          self.user_foo['id'])
+
+    def test_remove_role_grant_from_user_and_project(self):
+        self.assignment_api.create_grant(user_id=self.user_foo['id'],
+                                         project_id=self.tenant_baz['id'],
+                                         role_id='member')
+        roles_ref = self.assignment_api.list_grants(
+            user_id=self.user_foo['id'],
+            project_id=self.tenant_baz['id'])
+        self.assertDictEqual(roles_ref[0], self.role_member)
+
+        self.assignment_api.delete_grant(user_id=self.user_foo['id'],
+                                         project_id=self.tenant_baz['id'],
+                                         role_id='member')
+        roles_ref = self.assignment_api.list_grants(
+            user_id=self.user_foo['id'],
+            project_id=self.tenant_baz['id'])
+        self.assertEqual(0, len(roles_ref))
+        self.assertRaises(exception.RoleAssignmentNotFound,
+                          self.assignment_api.delete_grant,
+                          user_id=self.user_foo['id'],
+                          project_id=self.tenant_baz['id'],
+                          role_id='member')
+
+    def test_get_and_remove_role_grant_by_group_and_project(self):
+        new_domain = self._get_domain_fixture()
+        new_group = {'domain_id': new_domain['id'],
+                     'name': uuid.uuid4().hex}
+        new_group = self.identity_api.create_group(new_group)
+        new_user = {'name': 'new_user', 'enabled': True,
+                    'domain_id': new_domain['id']}
+        new_user = self.identity_api.create_user(new_user)
+        self.identity_api.add_user_to_group(new_user['id'],
+                                            new_group['id'])
+
+        roles_ref = self.assignment_api.list_grants(
+            group_id=new_group['id'],
+            project_id=self.tenant_bar['id'])
+        self.assertEqual([], roles_ref)
+        self.assertEqual(0, len(roles_ref))
+
+        self.assignment_api.create_grant(group_id=new_group['id'],
+                                         project_id=self.tenant_bar['id'],
+                                         role_id='member')
+        roles_ref = self.assignment_api.list_grants(
+            group_id=new_group['id'],
+            project_id=self.tenant_bar['id'])
+        self.assertNotEmpty(roles_ref)
+        self.assertDictEqual(roles_ref[0], self.role_member)
+
+        self.assignment_api.delete_grant(group_id=new_group['id'],
+                                         project_id=self.tenant_bar['id'],
+                                         role_id='member')
+        roles_ref = self.assignment_api.list_grants(
+            group_id=new_group['id'],
+            project_id=self.tenant_bar['id'])
+        self.assertEqual(0, len(roles_ref))
+        self.assertRaises(exception.RoleAssignmentNotFound,
+                          self.assignment_api.delete_grant,
+                          group_id=new_group['id'],
+                          project_id=self.tenant_bar['id'],
+                          role_id='member')
+
+    def test_get_and_remove_role_grant_by_group_and_domain(self):
+        self.skipTest('N/A: LDAP does not support multiple domains')
+
+    def test_get_role_assignment_by_domain_not_found(self):
+        self.skipTest('N/A: LDAP does not support multiple domains')
+
+    def test_del_role_assignment_by_domain_not_found(self):
+        self.skipTest('N/A: LDAP does not support multiple domains')
+
+    def test_get_and_remove_role_grant_by_user_and_domain(self):
+        self.skipTest('N/A: LDAP does not support multiple domains')
+
+    def test_get_and_remove_correct_role_grant_from_a_mix(self):
+        self.skipTest('Blocked by bug 1101287')
+
+    def test_get_and_remove_role_grant_by_group_and_cross_domain(self):
+        self.skipTest('N/A: LDAP does not support multiple domains')
+
+    def test_get_and_remove_role_grant_by_user_and_cross_domain(self):
+        self.skipTest('N/A: LDAP does not support multiple domains')
+
+    def test_role_grant_by_group_and_cross_domain_project(self):
+        self.skipTest('N/A: LDAP does not support multiple domains')
+
+    def test_role_grant_by_user_and_cross_domain_project(self):
+        self.skipTest('N/A: LDAP does not support multiple domains')
+
+    def test_multi_role_grant_by_user_group_on_project_domain(self):
+        self.skipTest('N/A: LDAP does not support multiple domains')
+
+    def test_delete_role_with_user_and_group_grants(self):
+        self.skipTest('Blocked by bug 1101287')
+
+    def test_delete_user_with_group_project_domain_links(self):
+        self.skipTest('N/A: LDAP does not support multiple domains')
+
+    def test_delete_group_with_user_project_domain_links(self):
+        self.skipTest('N/A: LDAP does not support multiple domains')
+
+    def test_list_projects_for_user(self):
+        domain = self._get_domain_fixture()
+        user1 = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+                 'domain_id': domain['id'], 'enabled': True}
+        user1 = self.identity_api.create_user(user1)
+        user_projects = self.assignment_api.list_projects_for_user(user1['id'])
+        self.assertThat(user_projects, matchers.HasLength(0))
+
+        # new grant(user1, role_member, tenant_bar)
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         project_id=self.tenant_bar['id'],
+                                         role_id=self.role_member['id'])
+        # new grant(user1, role_member, tenant_baz)
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         project_id=self.tenant_baz['id'],
+                                         role_id=self.role_member['id'])
+        user_projects = self.assignment_api.list_projects_for_user(user1['id'])
+        self.assertThat(user_projects, matchers.HasLength(2))
+
+        # Now, check number of projects through groups
+        user2 = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+                 'domain_id': domain['id'], 'enabled': True}
+        user2 = self.identity_api.create_user(user2)
+
+        group1 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
+        group1 = self.identity_api.create_group(group1)
+
+        self.identity_api.add_user_to_group(user2['id'], group1['id'])
+
+        # new grant(group1(user2), role_member, tenant_bar)
+        self.assignment_api.create_grant(group_id=group1['id'],
+                                         project_id=self.tenant_bar['id'],
+                                         role_id=self.role_member['id'])
+        # new grant(group1(user2), role_member, tenant_baz)
+        self.assignment_api.create_grant(group_id=group1['id'],
+                                         project_id=self.tenant_baz['id'],
+                                         role_id=self.role_member['id'])
+        user_projects = self.assignment_api.list_projects_for_user(user2['id'])
+        self.assertThat(user_projects, matchers.HasLength(2))
+
+        # new grant(group1(user2), role_other, tenant_bar)
+        self.assignment_api.create_grant(group_id=group1['id'],
+                                         project_id=self.tenant_bar['id'],
+                                         role_id=self.role_other['id'])
+        user_projects = self.assignment_api.list_projects_for_user(user2['id'])
+        self.assertThat(user_projects, matchers.HasLength(2))
+
+    def test_list_projects_for_user_and_groups(self):
+        domain = self._get_domain_fixture()
+        # Create user1
+        user1 = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+                 'domain_id': domain['id'], 'enabled': True}
+        user1 = self.identity_api.create_user(user1)
+
+        # Create new group for user1
+        group1 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
+        group1 = self.identity_api.create_group(group1)
+
+        # Add user1 to group1
+        self.identity_api.add_user_to_group(user1['id'], group1['id'])
+
+        # Now, add grant to user1 and group1 in tenant_bar
+        self.assignment_api.create_grant(user_id=user1['id'],
+                                         project_id=self.tenant_bar['id'],
+                                         role_id=self.role_member['id'])
+        self.assignment_api.create_grant(group_id=group1['id'],
+                                         project_id=self.tenant_bar['id'],
+                                         role_id=self.role_member['id'])
+
+        # The result is user1 has only one project granted
+        user_projects = self.assignment_api.list_projects_for_user(user1['id'])
+        self.assertThat(user_projects, matchers.HasLength(1))
+
+        # Now, delete user1 grant into tenant_bar and check
+        self.assignment_api.delete_grant(user_id=user1['id'],
+                                         project_id=self.tenant_bar['id'],
+                                         role_id=self.role_member['id'])
+
+        # The result is user1 has only one project granted.
+        # Granted through group1.
+        user_projects = self.assignment_api.list_projects_for_user(user1['id'])
+        self.assertThat(user_projects, matchers.HasLength(1))
+
+    def test_list_projects_for_user_with_grants(self):
+        domain = self._get_domain_fixture()
+        new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
+                    'enabled': True, 'domain_id': domain['id']}
+        new_user = self.identity_api.create_user(new_user)
+
+        group1 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
+        group1 = self.identity_api.create_group(group1)
+        group2 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
+        group2 = self.identity_api.create_group(group2)
+
+        project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain['id']}
+        self.resource_api.create_project(project1['id'], project1)
+        project2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain['id']}
+        self.resource_api.create_project(project2['id'], project2)
+
+        self.identity_api.add_user_to_group(new_user['id'],
+                                            group1['id'])
+        self.identity_api.add_user_to_group(new_user['id'],
+                                            group2['id'])
+
+        self.assignment_api.create_grant(user_id=new_user['id'],
+                                         project_id=self.tenant_bar['id'],
+                                         role_id=self.role_member['id'])
+        self.assignment_api.create_grant(user_id=new_user['id'],
+                                         project_id=project1['id'],
+                                         role_id=self.role_admin['id'])
+        self.assignment_api.create_grant(group_id=group2['id'],
+                                         project_id=project2['id'],
+                                         role_id=self.role_admin['id'])
+
+        user_projects = self.assignment_api.list_projects_for_user(
+            new_user['id'])
+        self.assertEqual(3, len(user_projects))
+
+    def test_create_duplicate_user_name_in_different_domains(self):
+        self.skipTest('Domains are read-only against LDAP')
+
+    def test_create_duplicate_project_name_in_different_domains(self):
+        self.skipTest('Domains are read-only against LDAP')
+
+    def test_create_duplicate_group_name_in_different_domains(self):
+        self.skipTest(
+            'N/A: LDAP does not support multiple domains')
+
+    def test_move_user_between_domains(self):
+        self.skipTest('Domains are read-only against LDAP')
+
+    def test_move_user_between_domains_with_clashing_names_fails(self):
+        self.skipTest('Domains are read-only against LDAP')
+
+    def test_move_group_between_domains(self):
+        self.skipTest(
+            'N/A: LDAP does not support multiple domains')
+
+    def test_move_group_between_domains_with_clashing_names_fails(self):
+        self.skipTest('Domains are read-only against LDAP')
+
+    def test_move_project_between_domains(self):
+        self.skipTest('Domains are read-only against LDAP')
+
+    def test_move_project_between_domains_with_clashing_names_fails(self):
+        self.skipTest('Domains are read-only against LDAP')
+
+    def test_get_roles_for_user_and_domain(self):
+        self.skipTest('N/A: LDAP does not support multiple domains')
+
+    def test_get_roles_for_groups_on_domain(self):
+        self.skipTest('Blocked by bug: 1390125')
+
+    def test_get_roles_for_groups_on_project(self):
+        self.skipTest('Blocked by bug: 1390125')
+
+    def test_list_domains_for_groups(self):
+        self.skipTest('N/A: LDAP does not support multiple domains')
+
+    def test_list_projects_for_groups(self):
+        self.skipTest('Blocked by bug: 1390125')
+
+    def test_domain_delete_hierarchy(self):
+        self.skipTest('Domains are read-only against LDAP')
+
+    def test_list_role_assignments_unfiltered(self):
+        new_domain = self._get_domain_fixture()
+        new_user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+                    'enabled': True, 'domain_id': new_domain['id']}
+        new_user = self.identity_api.create_user(new_user)
+        new_group = {'domain_id': new_domain['id'], 'name': uuid.uuid4().hex}
+        new_group = self.identity_api.create_group(new_group)
+        new_project = {'id': uuid.uuid4().hex,
+                       'name': uuid.uuid4().hex,
+                       'domain_id': new_domain['id']}
+        self.resource_api.create_project(new_project['id'], new_project)
+
+        # First check how many role grant already exist
+        existing_assignments = len(self.assignment_api.list_role_assignments())
+
+        self.assignment_api.create_grant(user_id=new_user['id'],
+                                         project_id=new_project['id'],
+                                         role_id='other')
+        self.assignment_api.create_grant(group_id=new_group['id'],
+                                         project_id=new_project['id'],
+                                         role_id='admin')
+
+        # Read back the list of assignments - check it is gone up by 2
+        after_assignments = len(self.assignment_api.list_role_assignments())
+        self.assertEqual(existing_assignments + 2, after_assignments)
+
+    def test_list_role_assignments_dumb_member(self):
+        self.config_fixture.config(group='ldap', use_dumb_member=True)
+        self.clear_database()
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+
+        new_domain = self._get_domain_fixture()
+        new_user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+                    'enabled': True, 'domain_id': new_domain['id']}
+        new_user = self.identity_api.create_user(new_user)
+        new_project = {'id': uuid.uuid4().hex,
+                       'name': uuid.uuid4().hex,
+                       'domain_id': new_domain['id']}
+        self.resource_api.create_project(new_project['id'], new_project)
+        self.assignment_api.create_grant(user_id=new_user['id'],
+                                         project_id=new_project['id'],
+                                         role_id='other')
+
+        # Read back the list of assignments and ensure
+        # that the LDAP dumb member isn't listed.
+        assignment_ids = [a['user_id'] for a in
+                          self.assignment_api.list_role_assignments()]
+        dumb_id = common_ldap.BaseLdap._dn_to_id(CONF.ldap.dumb_member)
+        self.assertNotIn(dumb_id, assignment_ids)
+
+    def test_list_user_ids_for_project_dumb_member(self):
+        self.config_fixture.config(group='ldap', use_dumb_member=True)
+        self.clear_database()
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+
+        user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+                'enabled': True, 'domain_id': test_backend.DEFAULT_DOMAIN_ID}
+
+        user = self.identity_api.create_user(user)
+        self.assignment_api.add_user_to_project(self.tenant_baz['id'],
+                                                user['id'])
+        user_ids = self.assignment_api.list_user_ids_for_project(
+            self.tenant_baz['id'])
+
+        self.assertIn(user['id'], user_ids)
+
+        dumb_id = common_ldap.BaseLdap._dn_to_id(CONF.ldap.dumb_member)
+        self.assertNotIn(dumb_id, user_ids)
+
+    def test_multi_group_grants_on_project_domain(self):
+        self.skipTest('Blocked by bug 1101287')
+
+    def test_list_group_members_missing_entry(self):
+        """List group members with deleted user.
+
+        If a group has a deleted entry for a member, the non-deleted members
+        are returned.
+
+        """
+
+        # Create a group
+        group = dict(name=uuid.uuid4().hex,
+                     domain_id=CONF.identity.default_domain_id)
+        group_id = self.identity_api.create_group(group)['id']
+
+        # Create a couple of users and add them to the group.
+        user = dict(name=uuid.uuid4().hex,
+                    domain_id=CONF.identity.default_domain_id)
+        user_1_id = self.identity_api.create_user(user)['id']
+
+        self.identity_api.add_user_to_group(user_1_id, group_id)
+
+        user = dict(name=uuid.uuid4().hex,
+                    domain_id=CONF.identity.default_domain_id)
+        user_2_id = self.identity_api.create_user(user)['id']
+
+        self.identity_api.add_user_to_group(user_2_id, group_id)
+
+        # Delete user 2
+        # NOTE(blk-u): need to go directly to user interface to keep from
+        # updating the group.
+        unused, driver, entity_id = (
+            self.identity_api._get_domain_driver_and_entity_id(user_2_id))
+        driver.user.delete(entity_id)
+
+        # List group users and verify only user 1.
+        res = self.identity_api.list_users_in_group(group_id)
+
+        self.assertEqual(1, len(res), "Expected 1 entry (user_1)")
+        self.assertEqual(user_1_id, res[0]['id'], "Expected user 1 id")
+
+    def test_list_group_members_when_no_members(self):
+        # List group members when there is no member in the group.
+        # No exception should be raised.
+        group = {
+            'domain_id': CONF.identity.default_domain_id,
+            'name': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex}
+        group = self.identity_api.create_group(group)
+
+        # If this doesn't raise, then the test is successful.
+        self.identity_api.list_users_in_group(group['id'])
+
+    def test_list_group_members_dumb_member(self):
+        self.config_fixture.config(group='ldap', use_dumb_member=True)
+        self.clear_database()
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+
+        # Create a group
+        group = dict(name=uuid.uuid4().hex,
+                     domain_id=CONF.identity.default_domain_id)
+        group_id = self.identity_api.create_group(group)['id']
+
+        # Create a user
+        user = dict(name=uuid.uuid4().hex,
+                    domain_id=CONF.identity.default_domain_id)
+        user_id = self.identity_api.create_user(user)['id']
+
+        # Add user to the group
+        self.identity_api.add_user_to_group(user_id, group_id)
+
+        user_ids = self.identity_api.list_users_in_group(group_id)
+        dumb_id = common_ldap.BaseLdap._dn_to_id(CONF.ldap.dumb_member)
+
+        self.assertNotIn(dumb_id, user_ids)
+
+    def test_list_domains(self):
+        domains = self.resource_api.list_domains()
+        self.assertEqual(
+            [resource.calc_default_domain()],
+            domains)
+
+    def test_list_domains_non_default_domain_id(self):
+        # If change the default_domain_id, the ID of the default domain
+        # returned by list_domains changes is the new default_domain_id.
+
+        new_domain_id = uuid.uuid4().hex
+        self.config_fixture.config(group='identity',
+                                   default_domain_id=new_domain_id)
+
+        domains = self.resource_api.list_domains()
+
+        self.assertEqual(new_domain_id, domains[0]['id'])
+
+    def test_authenticate_requires_simple_bind(self):
+        user = {
+            'name': 'NO_META',
+            'domain_id': test_backend.DEFAULT_DOMAIN_ID,
+            'password': 'no_meta2',
+            'enabled': True,
+        }
+        user = self.identity_api.create_user(user)
+        self.assignment_api.add_user_to_project(self.tenant_baz['id'],
+                                                user['id'])
+        driver = self.identity_api._select_identity_driver(
+            user['domain_id'])
+        driver.user.LDAP_USER = None
+        driver.user.LDAP_PASSWORD = None
+
+        self.assertRaises(AssertionError,
+                          self.identity_api.authenticate,
+                          context={},
+                          user_id=user['id'],
+                          password=None)
+
+    # (spzala)The group and domain crud tests below override the standard ones
+    # in test_backend.py so that we can exclude the update name test, since we
+    # do not yet support the update of either group or domain names with LDAP.
+    # In the tests below, the update is demonstrated by updating description.
+    # Refer to bug 1136403 for more detail.
+    def test_group_crud(self):
+        group = {
+            'domain_id': CONF.identity.default_domain_id,
+            'name': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex}
+        group = self.identity_api.create_group(group)
+        group_ref = self.identity_api.get_group(group['id'])
+        self.assertDictEqual(group_ref, group)
+        group['description'] = uuid.uuid4().hex
+        self.identity_api.update_group(group['id'], group)
+        group_ref = self.identity_api.get_group(group['id'])
+        self.assertDictEqual(group_ref, group)
+
+        self.identity_api.delete_group(group['id'])
+        self.assertRaises(exception.GroupNotFound,
+                          self.identity_api.get_group,
+                          group['id'])
+
+    @tests.skip_if_cache_disabled('identity')
+    def test_cache_layer_group_crud(self):
+        group = {
+            'domain_id': CONF.identity.default_domain_id,
+            'name': uuid.uuid4().hex}
+        group = self.identity_api.create_group(group)
+        # cache the result
+        group_ref = self.identity_api.get_group(group['id'])
+        # delete the group bypassing identity api.
+        domain_id, driver, entity_id = (
+            self.identity_api._get_domain_driver_and_entity_id(group['id']))
+        driver.delete_group(entity_id)
+
+        self.assertEqual(group_ref,
+                         self.identity_api.get_group(group['id']))
+        self.identity_api.get_group.invalidate(self.identity_api, group['id'])
+        self.assertRaises(exception.GroupNotFound,
+                          self.identity_api.get_group, group['id'])
+
+        group = {
+            'domain_id': CONF.identity.default_domain_id,
+            'name': uuid.uuid4().hex}
+        group = self.identity_api.create_group(group)
+        # cache the result
+        self.identity_api.get_group(group['id'])
+        group['description'] = uuid.uuid4().hex
+        group_ref = self.identity_api.update_group(group['id'], group)
+        self.assertDictContainsSubset(self.identity_api.get_group(group['id']),
+                                      group_ref)
+
+    def test_create_user_none_mapping(self):
+        # When create a user where an attribute maps to None, the entry is
+        # created without that attribute and it doesn't fail with a TypeError.
+        conf = self.get_config(CONF.identity.default_domain_id)
+        conf.ldap.user_attribute_ignore = ['enabled', 'email',
+                                           'tenants', 'tenantId']
+        self.reload_backends(CONF.identity.default_domain_id)
+
+        user = {'name': u'fäké1',
+                'password': u'fäképass1',
+                'domain_id': CONF.identity.default_domain_id,
+                'default_project_id': 'maps_to_none',
+                }
+
+        # If this doesn't raise, then the test is successful.
+        user = self.identity_api.create_user(user)
+
+    def test_create_user_with_boolean_string_names(self):
+        # Ensure that any attribute that is equal to the string 'TRUE'
+        # or 'FALSE' will not be converted to a boolean value, it
+        # should be returned as is.
+        boolean_strings = ['TRUE', 'FALSE', 'true', 'false', 'True', 'False',
+                           'TrUe' 'FaLse']
+        for name in boolean_strings:
+            user = {
+                'name': name,
+                'domain_id': CONF.identity.default_domain_id}
+            user_ref = self.identity_api.create_user(user)
+            user_info = self.identity_api.get_user(user_ref['id'])
+            self.assertEqual(name, user_info['name'])
+            # Delete the user to ensure  that the Keystone uniqueness
+            # requirements combined with the case-insensitive nature of a
+            # typical LDAP schema does not cause subsequent names in
+            # boolean_strings to clash.
+            self.identity_api.delete_user(user_ref['id'])
+
+    def test_unignored_user_none_mapping(self):
+        # Ensure that an attribute that maps to None that is not explicitly
+        # ignored in configuration is implicitly ignored without triggering
+        # an error.
+        conf = self.get_config(CONF.identity.default_domain_id)
+        conf.ldap.user_attribute_ignore = ['enabled', 'email',
+                                           'tenants', 'tenantId']
+        self.reload_backends(CONF.identity.default_domain_id)
+
+        user = {'name': u'fäké1',
+                'password': u'fäképass1',
+                'domain_id': CONF.identity.default_domain_id,
+                }
+
+        user_ref = self.identity_api.create_user(user)
+
+        # If this doesn't raise, then the test is successful.
+        self.identity_api.get_user(user_ref['id'])
+
+    def test_update_user_name(self):
+        """A user's name cannot be changed through the LDAP driver."""
+        self.assertRaises(exception.Conflict,
+                          super(BaseLDAPIdentity, self).test_update_user_name)
+
+    def test_arbitrary_attributes_are_returned_from_get_user(self):
+        self.skipTest("Using arbitrary attributes doesn't work under LDAP")
+
+    def test_new_arbitrary_attributes_are_returned_from_update_user(self):
+        self.skipTest("Using arbitrary attributes doesn't work under LDAP")
+
+    def test_updated_arbitrary_attributes_are_returned_from_update_user(self):
+        self.skipTest("Using arbitrary attributes doesn't work under LDAP")
+
+    def test_cache_layer_domain_crud(self):
+        # TODO(morganfainberg): This also needs to be removed when full LDAP
+        # implementation is submitted.  No need to duplicate the above test,
+        # just skip this time.
+        self.skipTest('Domains are read-only against LDAP')
+
+    def test_user_id_comma(self):
+        """Even if the user has a , in their ID, groups can be listed."""
+
+        # Create a user with a , in their ID
+        # NOTE(blk-u): the DN for this user is hard-coded in fakeldap!
+
+        # Since we want to fake up this special ID, we'll squirt this
+        # direct into the driver and bypass the manager layer.
+        user_id = u'Doe, John'
+        user = {
+            'id': user_id,
+            'name': self.getUniqueString(),
+            'password': self.getUniqueString(),
+            'domain_id': CONF.identity.default_domain_id,
+        }
+        user = self.identity_api.driver.create_user(user_id, user)
+
+        # Now we'll use the manager to discover it, which will create a
+        # Public ID for it.
+        ref_list = self.identity_api.list_users()
+        public_user_id = None
+        for ref in ref_list:
+            if ref['name'] == user['name']:
+                public_user_id = ref['id']
+                break
+
+        # Create a group
+        group_id = uuid.uuid4().hex
+        group = {
+            'id': group_id,
+            'name': self.getUniqueString(prefix='tuidc'),
+            'description': self.getUniqueString(),
+            'domain_id': CONF.identity.default_domain_id,
+        }
+        group = self.identity_api.driver.create_group(group_id, group)
+        # Now we'll use the manager to discover it, which will create a
+        # Public ID for it.
+        ref_list = self.identity_api.list_groups()
+        public_group_id = None
+        for ref in ref_list:
+            if ref['name'] == group['name']:
+                public_group_id = ref['id']
+                break
+
+        # Put the user in the group
+        self.identity_api.add_user_to_group(public_user_id, public_group_id)
+
+        # List groups for user.
+        ref_list = self.identity_api.list_groups_for_user(public_user_id)
+
+        group['id'] = public_group_id
+        self.assertThat(ref_list, matchers.Equals([group]))
+
+    def test_user_id_comma_grants(self):
+        """Even if the user has a , in their ID, can get user and group grants.
+        """
+
+        # Create a user with a , in their ID
+        # NOTE(blk-u): the DN for this user is hard-coded in fakeldap!
+
+        # Since we want to fake up this special ID, we'll squirt this
+        # direct into the driver and bypass the manager layer
+        user_id = u'Doe, John'
+        user = {
+            'id': user_id,
+            'name': self.getUniqueString(),
+            'password': self.getUniqueString(),
+            'domain_id': CONF.identity.default_domain_id,
+        }
+        self.identity_api.driver.create_user(user_id, user)
+
+        # Now we'll use the manager to discover it, which will create a
+        # Public ID for it.
+        ref_list = self.identity_api.list_users()
+        public_user_id = None
+        for ref in ref_list:
+            if ref['name'] == user['name']:
+                public_user_id = ref['id']
+                break
+
+        # Grant the user a role on a project.
+
+        role_id = 'member'
+        project_id = self.tenant_baz['id']
+
+        self.assignment_api.create_grant(role_id, user_id=public_user_id,
+                                         project_id=project_id)
+
+        role_ref = self.assignment_api.get_grant(role_id,
+                                                 user_id=public_user_id,
+                                                 project_id=project_id)
+
+        self.assertEqual(role_id, role_ref['id'])
+
+    def test_user_enabled_ignored_disable_error(self):
+        # When the server is configured so that the enabled attribute is
+        # ignored for users, users cannot be disabled.
+
+        self.config_fixture.config(group='ldap',
+                                   user_attribute_ignore=['enabled'])
+
+        # Need to re-load backends for the config change to take effect.
+        self.load_backends()
+
+        # Attempt to disable the user.
+        self.assertRaises(exception.ForbiddenAction,
+                          self.identity_api.update_user, self.user_foo['id'],
+                          {'enabled': False})
+
+        user_info = self.identity_api.get_user(self.user_foo['id'])
+
+        # If 'enabled' is ignored then 'enabled' isn't returned as part of the
+        # ref.
+        self.assertNotIn('enabled', user_info)
+
+    def test_group_enabled_ignored_disable_error(self):
+        # When the server is configured so that the enabled attribute is
+        # ignored for groups, groups cannot be disabled.
+
+        self.config_fixture.config(group='ldap',
+                                   group_attribute_ignore=['enabled'])
+
+        # Need to re-load backends for the config change to take effect.
+        self.load_backends()
+
+        # There's no group fixture so create a group.
+        new_domain = self._get_domain_fixture()
+        new_group = {'domain_id': new_domain['id'],
+                     'name': uuid.uuid4().hex}
+        new_group = self.identity_api.create_group(new_group)
+
+        # Attempt to disable the group.
+        self.assertRaises(exception.ForbiddenAction,
+                          self.identity_api.update_group, new_group['id'],
+                          {'enabled': False})
+
+        group_info = self.identity_api.get_group(new_group['id'])
+
+        # If 'enabled' is ignored then 'enabled' isn't returned as part of the
+        # ref.
+        self.assertNotIn('enabled', group_info)
+
+    def test_project_enabled_ignored_disable_error(self):
+        # When the server is configured so that the enabled attribute is
+        # ignored for projects, projects cannot be disabled.
+
+        self.config_fixture.config(group='ldap',
+                                   project_attribute_ignore=['enabled'])
+
+        # Need to re-load backends for the config change to take effect.
+        self.load_backends()
+
+        # Attempt to disable the project.
+        self.assertRaises(exception.ForbiddenAction,
+                          self.resource_api.update_project,
+                          self.tenant_baz['id'], {'enabled': False})
+
+        project_info = self.resource_api.get_project(self.tenant_baz['id'])
+
+        # Unlike other entities, if 'enabled' is ignored then 'enabled' is
+        # returned as part of the ref.
+        self.assertIs(True, project_info['enabled'])
+
+
+class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
+
+    def setUp(self):
+        # NOTE(dstanek): The database must be setup prior to calling the
+        # parent's setUp. The parent's setUp uses services (like
+        # credentials) that require a database.
+        self.useFixture(database.Database())
+        super(LDAPIdentity, self).setUp()
+
+    def load_fixtures(self, fixtures):
+        # Override super impl since need to create group container.
+        create_group_container(self.identity_api)
+        super(LDAPIdentity, self).load_fixtures(fixtures)
+
+    def test_configurable_allowed_project_actions(self):
+        tenant = {'id': u'fäké1', 'name': u'fäké1', 'enabled': True}
+        self.resource_api.create_project(u'fäké1', tenant)
+        tenant_ref = self.resource_api.get_project(u'fäké1')
+        self.assertEqual(u'fäké1', tenant_ref['id'])
+
+        tenant['enabled'] = False
+        self.resource_api.update_project(u'fäké1', tenant)
+
+        self.resource_api.delete_project(u'fäké1')
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project,
+                          u'fäké1')
+
+    def test_configurable_subtree_delete(self):
+        self.config_fixture.config(group='ldap', allow_subtree_delete=True)
+        self.load_backends()
+
+        project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': CONF.identity.default_domain_id}
+        self.resource_api.create_project(project1['id'], project1)
+
+        role1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.role_api.create_role(role1['id'], role1)
+
+        user1 = {'name': uuid.uuid4().hex,
+                 'domain_id': CONF.identity.default_domain_id,
+                 'password': uuid.uuid4().hex,
+                 'enabled': True}
+        user1 = self.identity_api.create_user(user1)
+
+        self.assignment_api.add_role_to_user_and_project(
+            user_id=user1['id'],
+            tenant_id=project1['id'],
+            role_id=role1['id'])
+
+        self.resource_api.delete_project(project1['id'])
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project,
+                          project1['id'])
+
+        self.resource_api.create_project(project1['id'], project1)
+
+        list = self.assignment_api.get_roles_for_user_and_project(
+            user1['id'],
+            project1['id'])
+        self.assertEqual(0, len(list))
+
+    def test_configurable_forbidden_project_actions(self):
+        self.config_fixture.config(
+            group='ldap', project_allow_create=False,
+            project_allow_update=False, project_allow_delete=False)
+        self.load_backends()
+
+        tenant = {'id': u'fäké1', 'name': u'fäké1'}
+        self.assertRaises(exception.ForbiddenAction,
+                          self.resource_api.create_project,
+                          u'fäké1',
+                          tenant)
+
+        self.tenant_bar['enabled'] = False
+        self.assertRaises(exception.ForbiddenAction,
+                          self.resource_api.update_project,
+                          self.tenant_bar['id'],
+                          self.tenant_bar)
+        self.assertRaises(exception.ForbiddenAction,
+                          self.resource_api.delete_project,
+                          self.tenant_bar['id'])
+
+    def test_project_filter(self):
+        tenant_ref = self.resource_api.get_project(self.tenant_bar['id'])
+        self.assertDictEqual(tenant_ref, self.tenant_bar)
+
+        self.config_fixture.config(group='ldap',
+                                   project_filter='(CN=DOES_NOT_MATCH)')
+        self.load_backends()
+        # NOTE(morganfainberg): CONF.ldap.project_filter  will not be
+        # dynamically changed at runtime. This invalidate is a work-around for
+        # the expectation that it is safe to change config values in tests that
+        # could affect what the drivers would return up to the manager.  This
+        # solves this assumption when working with aggressive (on-create)
+        # cache population.
+        self.role_api.get_role.invalidate(self.role_api,
+                                          self.role_member['id'])
+        self.role_api.get_role(self.role_member['id'])
+        self.resource_api.get_project.invalidate(self.resource_api,
+                                                 self.tenant_bar['id'])
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project,
+                          self.tenant_bar['id'])
+
+    def test_dumb_member(self):
+        self.config_fixture.config(group='ldap', use_dumb_member=True)
+        self.clear_database()
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+        dumb_id = common_ldap.BaseLdap._dn_to_id(CONF.ldap.dumb_member)
+        self.assertRaises(exception.UserNotFound,
+                          self.identity_api.get_user,
+                          dumb_id)
+
+    def test_project_attribute_mapping(self):
+        self.config_fixture.config(
+            group='ldap', project_name_attribute='ou',
+            project_desc_attribute='description',
+            project_enabled_attribute='enabled')
+        self.clear_database()
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+        # NOTE(morganfainberg): CONF.ldap.project_name_attribute,
+        # CONF.ldap.project_desc_attribute, and
+        # CONF.ldap.project_enabled_attribute will not be
+        # dynamically changed at runtime. This invalidate is a work-around for
+        # the expectation that it is safe to change config values in tests that
+        # could affect what the drivers would return up to the manager.  This
+        # solves this assumption when working with aggressive (on-create)
+        # cache population.
+        self.resource_api.get_project.invalidate(self.resource_api,
+                                                 self.tenant_baz['id'])
+        tenant_ref = self.resource_api.get_project(self.tenant_baz['id'])
+        self.assertEqual(self.tenant_baz['id'], tenant_ref['id'])
+        self.assertEqual(self.tenant_baz['name'], tenant_ref['name'])
+        self.assertEqual(
+            self.tenant_baz['description'],
+            tenant_ref['description'])
+        self.assertEqual(self.tenant_baz['enabled'], tenant_ref['enabled'])
+
+        self.config_fixture.config(group='ldap',
+                                   project_name_attribute='description',
+                                   project_desc_attribute='ou')
+        self.load_backends()
+        # NOTE(morganfainberg): CONF.ldap.project_name_attribute,
+        # CONF.ldap.project_desc_attribute, and
+        # CONF.ldap.project_enabled_attribute will not be
+        # dynamically changed at runtime. This invalidate is a work-around for
+        # the expectation that it is safe to change config values in tests that
+        # could affect what the drivers would return up to the manager.  This
+        # solves this assumption when working with aggressive (on-create)
+        # cache population.
+        self.resource_api.get_project.invalidate(self.resource_api,
+                                                 self.tenant_baz['id'])
+        tenant_ref = self.resource_api.get_project(self.tenant_baz['id'])
+        self.assertEqual(self.tenant_baz['id'], tenant_ref['id'])
+        self.assertEqual(self.tenant_baz['description'], tenant_ref['name'])
+        self.assertEqual(self.tenant_baz['name'], tenant_ref['description'])
+        self.assertEqual(self.tenant_baz['enabled'], tenant_ref['enabled'])
+
+    def test_project_attribute_ignore(self):
+        self.config_fixture.config(
+            group='ldap',
+            project_attribute_ignore=['name', 'description', 'enabled'])
+        self.clear_database()
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+        # NOTE(morganfainberg): CONF.ldap.project_attribute_ignore will not be
+        # dynamically changed at runtime. This invalidate is a work-around for
+        # the expectation that it is safe to change configs values in tests
+        # that could affect what the drivers would return up to the manager.
+        # This solves this assumption when working with aggressive (on-create)
+        # cache population.
+        self.resource_api.get_project.invalidate(self.resource_api,
+                                                 self.tenant_baz['id'])
+        tenant_ref = self.resource_api.get_project(self.tenant_baz['id'])
+        self.assertEqual(self.tenant_baz['id'], tenant_ref['id'])
+        self.assertNotIn('name', tenant_ref)
+        self.assertNotIn('description', tenant_ref)
+        self.assertNotIn('enabled', tenant_ref)
+
+    def test_user_enable_attribute_mask(self):
+        self.config_fixture.config(group='ldap', user_enabled_mask=2,
+                                   user_enabled_default='512')
+        self.clear_database()
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+
+        user = {'name': u'fäké1', 'enabled': True,
+                'domain_id': CONF.identity.default_domain_id}
+
+        user_ref = self.identity_api.create_user(user)
+
+        # Use assertIs rather than assertTrue because assertIs will assert the
+        # value is a Boolean as expected.
+        self.assertIs(user_ref['enabled'], True)
+        self.assertNotIn('enabled_nomask', user_ref)
+
+        enabled_vals = self.get_user_enabled_vals(user_ref)
+        self.assertEqual([512], enabled_vals)
+
+        user_ref = self.identity_api.get_user(user_ref['id'])
+        self.assertIs(user_ref['enabled'], True)
+        self.assertNotIn('enabled_nomask', user_ref)
+
+        user['enabled'] = False
+        user_ref = self.identity_api.update_user(user_ref['id'], user)
+        self.assertIs(user_ref['enabled'], False)
+        self.assertNotIn('enabled_nomask', user_ref)
+
+        enabled_vals = self.get_user_enabled_vals(user_ref)
+        self.assertEqual([514], enabled_vals)
+
+        user_ref = self.identity_api.get_user(user_ref['id'])
+        self.assertIs(user_ref['enabled'], False)
+        self.assertNotIn('enabled_nomask', user_ref)
+
+        user['enabled'] = True
+        user_ref = self.identity_api.update_user(user_ref['id'], user)
+        self.assertIs(user_ref['enabled'], True)
+        self.assertNotIn('enabled_nomask', user_ref)
+
+        enabled_vals = self.get_user_enabled_vals(user_ref)
+        self.assertEqual([512], enabled_vals)
+
+        user_ref = self.identity_api.get_user(user_ref['id'])
+        self.assertIs(user_ref['enabled'], True)
+        self.assertNotIn('enabled_nomask', user_ref)
+
+    def test_user_enabled_invert(self):
+        self.config_fixture.config(group='ldap', user_enabled_invert=True,
+                                   user_enabled_default=False)
+        self.clear_database()
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+
+        user1 = {'name': u'fäké1', 'enabled': True,
+                 'domain_id': CONF.identity.default_domain_id}
+
+        user2 = {'name': u'fäké2', 'enabled': False,
+                 'domain_id': CONF.identity.default_domain_id}
+
+        user3 = {'name': u'fäké3',
+                 'domain_id': CONF.identity.default_domain_id}
+
+        # Ensure that the LDAP attribute is False for a newly created
+        # enabled user.
+        user_ref = self.identity_api.create_user(user1)
+        self.assertIs(True, user_ref['enabled'])
+        enabled_vals = self.get_user_enabled_vals(user_ref)
+        self.assertEqual([False], enabled_vals)
+        user_ref = self.identity_api.get_user(user_ref['id'])
+        self.assertIs(True, user_ref['enabled'])
+
+        # Ensure that the LDAP attribute is True for a disabled user.
+        user1['enabled'] = False
+        user_ref = self.identity_api.update_user(user_ref['id'], user1)
+        self.assertIs(False, user_ref['enabled'])
+        enabled_vals = self.get_user_enabled_vals(user_ref)
+        self.assertEqual([True], enabled_vals)
+
+        # Enable the user and ensure that the LDAP attribute is True again.
+        user1['enabled'] = True
+        user_ref = self.identity_api.update_user(user_ref['id'], user1)
+        self.assertIs(True, user_ref['enabled'])
+        enabled_vals = self.get_user_enabled_vals(user_ref)
+        self.assertEqual([False], enabled_vals)
+
+        # Ensure that the LDAP attribute is True for a newly created
+        # disabled user.
+        user_ref = self.identity_api.create_user(user2)
+        self.assertIs(False, user_ref['enabled'])
+        enabled_vals = self.get_user_enabled_vals(user_ref)
+        self.assertEqual([True], enabled_vals)
+        user_ref = self.identity_api.get_user(user_ref['id'])
+        self.assertIs(False, user_ref['enabled'])
+
+        # Ensure that the LDAP attribute is inverted for a newly created
+        # user when the user_enabled_default setting is used.
+        user_ref = self.identity_api.create_user(user3)
+        self.assertIs(True, user_ref['enabled'])
+        enabled_vals = self.get_user_enabled_vals(user_ref)
+        self.assertEqual([False], enabled_vals)
+        user_ref = self.identity_api.get_user(user_ref['id'])
+        self.assertIs(True, user_ref['enabled'])
+
+    @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
+    def test_user_enabled_invert_no_enabled_value(self, mock_ldap_get):
+        self.config_fixture.config(group='ldap', user_enabled_invert=True,
+                                   user_enabled_default=False)
+        # Mock the search results to return an entry with
+        # no enabled value.
+        mock_ldap_get.return_value = (
+            'cn=junk,dc=example,dc=com',
+            {
+                'sn': [uuid.uuid4().hex],
+                'email': [uuid.uuid4().hex],
+                'cn': ['junk']
+            }
+        )
+
+        user_api = identity.backends.ldap.UserApi(CONF)
+        user_ref = user_api.get('junk')
+        # Ensure that the model enabled attribute is inverted
+        # from the resource default.
+        self.assertIs(not CONF.ldap.user_enabled_default, user_ref['enabled'])
+
+    @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
+    def test_user_enabled_invert_default_str_value(self, mock_ldap_get):
+        self.config_fixture.config(group='ldap', user_enabled_invert=True,
+                                   user_enabled_default='False')
+        # Mock the search results to return an entry with
+        # no enabled value.
+        mock_ldap_get.return_value = (
+            'cn=junk,dc=example,dc=com',
+            {
+                'sn': [uuid.uuid4().hex],
+                'email': [uuid.uuid4().hex],
+                'cn': ['junk']
+            }
+        )
+
+        user_api = identity.backends.ldap.UserApi(CONF)
+        user_ref = user_api.get('junk')
+        # Ensure that the model enabled attribute is inverted
+        # from the resource default.
+        self.assertIs(True, user_ref['enabled'])
+
+    @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
+    def test_user_enabled_attribute_handles_expired(self, mock_ldap_get):
+        # If using 'passwordisexpired' as enabled attribute, and inverting it,
+        # Then an unauthorized user (expired password) should not be enabled.
+        self.config_fixture.config(group='ldap', user_enabled_invert=True,
+                                   user_enabled_attribute='passwordisexpired')
+        mock_ldap_get.return_value = (
+            u'uid=123456789,c=us,ou=our_ldap,o=acme.com',
+            {
+                'uid': [123456789],
+                'mail': ['shaun@acme.com'],
+                'passwordisexpired': ['TRUE'],
+                'cn': ['uid=123456789,c=us,ou=our_ldap,o=acme.com']
+            }
+        )
+
+        user_api = identity.backends.ldap.UserApi(CONF)
+        user_ref = user_api.get('123456789')
+        self.assertIs(False, user_ref['enabled'])
+
+    @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
+    def test_user_enabled_attribute_handles_utf8(self, mock_ldap_get):
+        # If using 'passwordisexpired' as enabled attribute, and inverting it,
+        # and the result is utf8 encoded, then the an authorized user should
+        # be enabled.
+        self.config_fixture.config(group='ldap', user_enabled_invert=True,
+                                   user_enabled_attribute='passwordisexpired')
+        mock_ldap_get.return_value = (
+            u'uid=123456789,c=us,ou=our_ldap,o=acme.com',
+            {
+                'uid': [123456789],
+                'mail': [u'shaun@acme.com'],
+                'passwordisexpired': [u'false'],
+                'cn': [u'uid=123456789,c=us,ou=our_ldap,o=acme.com']
+            }
+        )
+
+        user_api = identity.backends.ldap.UserApi(CONF)
+        user_ref = user_api.get('123456789')
+        self.assertIs(True, user_ref['enabled'])
+
+    @mock.patch.object(common_ldap_core.KeystoneLDAPHandler, 'simple_bind_s')
+    def test_user_api_get_connection_no_user_password(self, mocked_method):
+        """Don't bind in case the user and password are blank."""
+        # Ensure the username/password are in-fact blank
+        self.config_fixture.config(group='ldap', user=None, password=None)
+        user_api = identity.backends.ldap.UserApi(CONF)
+        user_api.get_connection(user=None, password=None)
+        self.assertFalse(mocked_method.called,
+                         msg='`simple_bind_s` method was unexpectedly called')
+
+    @mock.patch.object(common_ldap_core.KeystoneLDAPHandler, 'connect')
+    def test_chase_referrals_off(self, mocked_fakeldap):
+        self.config_fixture.config(
+            group='ldap',
+            url='fake://memory',
+            chase_referrals=False)
+        user_api = identity.backends.ldap.UserApi(CONF)
+        user_api.get_connection(user=None, password=None)
+
+        # The last call_arg should be a dictionary and should contain
+        # chase_referrals. Check to make sure the value of chase_referrals
+        # is as expected.
+        self.assertFalse(mocked_fakeldap.call_args[-1]['chase_referrals'])
+
+    @mock.patch.object(common_ldap_core.KeystoneLDAPHandler, 'connect')
+    def test_chase_referrals_on(self, mocked_fakeldap):
+        self.config_fixture.config(
+            group='ldap',
+            url='fake://memory',
+            chase_referrals=True)
+        user_api = identity.backends.ldap.UserApi(CONF)
+        user_api.get_connection(user=None, password=None)
+
+        # The last call_arg should be a dictionary and should contain
+        # chase_referrals. Check to make sure the value of chase_referrals
+        # is as expected.
+        self.assertTrue(mocked_fakeldap.call_args[-1]['chase_referrals'])
+
+    @mock.patch.object(common_ldap_core.KeystoneLDAPHandler, 'connect')
+    def test_debug_level_set(self, mocked_fakeldap):
+        level = 12345
+        self.config_fixture.config(
+            group='ldap',
+            url='fake://memory',
+            debug_level=level)
+        user_api = identity.backends.ldap.UserApi(CONF)
+        user_api.get_connection(user=None, password=None)
+
+        # The last call_arg should be a dictionary and should contain
+        # debug_level. Check to make sure the value of debug_level
+        # is as expected.
+        self.assertEqual(level, mocked_fakeldap.call_args[-1]['debug_level'])
+
+    def test_wrong_ldap_scope(self):
+        self.config_fixture.config(group='ldap', query_scope=uuid.uuid4().hex)
+        self.assertRaisesRegexp(
+            ValueError,
+            'Invalid LDAP scope: %s. *' % CONF.ldap.query_scope,
+            identity.backends.ldap.Identity)
+
+    def test_wrong_alias_dereferencing(self):
+        self.config_fixture.config(group='ldap',
+                                   alias_dereferencing=uuid.uuid4().hex)
+        self.assertRaisesRegexp(
+            ValueError,
+            'Invalid LDAP deref option: %s\.' % CONF.ldap.alias_dereferencing,
+            identity.backends.ldap.Identity)
+
+    def test_is_dumb_member(self):
+        self.config_fixture.config(group='ldap',
+                                   use_dumb_member=True)
+        self.load_backends()
+
+        dn = 'cn=dumb,dc=nonexistent'
+        self.assertTrue(self.identity_api.driver.user._is_dumb_member(dn))
+
+    def test_is_dumb_member_upper_case_keys(self):
+        self.config_fixture.config(group='ldap',
+                                   use_dumb_member=True)
+        self.load_backends()
+
+        dn = 'CN=dumb,DC=nonexistent'
+        self.assertTrue(self.identity_api.driver.user._is_dumb_member(dn))
+
+    def test_is_dumb_member_with_false_use_dumb_member(self):
+        self.config_fixture.config(group='ldap',
+                                   use_dumb_member=False)
+        self.load_backends()
+        dn = 'cn=dumb,dc=nonexistent'
+        self.assertFalse(self.identity_api.driver.user._is_dumb_member(dn))
+
+    def test_is_dumb_member_not_dumb(self):
+        self.config_fixture.config(group='ldap',
+                                   use_dumb_member=True)
+        self.load_backends()
+        dn = 'ou=some,dc=example.com'
+        self.assertFalse(self.identity_api.driver.user._is_dumb_member(dn))
+
+    def test_user_extra_attribute_mapping(self):
+        self.config_fixture.config(
+            group='ldap',
+            user_additional_attribute_mapping=['description:name'])
+        self.load_backends()
+        user = {
+            'name': 'EXTRA_ATTRIBUTES',
+            'password': 'extra',
+            'domain_id': CONF.identity.default_domain_id
+        }
+        user = self.identity_api.create_user(user)
+        dn, attrs = self.identity_api.driver.user._ldap_get(user['id'])
+        self.assertThat([user['name']], matchers.Equals(attrs['description']))
+
+    def test_user_extra_attribute_mapping_description_is_returned(self):
+        # Given a mapping like description:description, the description is
+        # returned.
+
+        self.config_fixture.config(
+            group='ldap',
+            user_additional_attribute_mapping=['description:description'])
+        self.load_backends()
+
+        description = uuid.uuid4().hex
+        user = {
+            'name': uuid.uuid4().hex,
+            'description': description,
+            'password': uuid.uuid4().hex,
+            'domain_id': CONF.identity.default_domain_id
+        }
+        user = self.identity_api.create_user(user)
+        res = self.identity_api.driver.user.get_all()
+
+        new_user = [u for u in res if u['id'] == user['id']][0]
+        self.assertThat(new_user['description'], matchers.Equals(description))
+
+    @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
+    def test_user_mixed_case_attribute(self, mock_ldap_get):
+        # Mock the search results to return attribute names
+        # with unexpected case.
+        mock_ldap_get.return_value = (
+            'cn=junk,dc=example,dc=com',
+            {
+                'sN': [uuid.uuid4().hex],
+                'MaIl': [uuid.uuid4().hex],
+                'cn': ['junk']
+            }
+        )
+        user = self.identity_api.get_user('junk')
+        self.assertEqual(mock_ldap_get.return_value[1]['sN'][0],
+                         user['name'])
+        self.assertEqual(mock_ldap_get.return_value[1]['MaIl'][0],
+                         user['email'])
+
+    def test_parse_extra_attribute_mapping(self):
+        option_list = ['description:name', 'gecos:password',
+                       'fake:invalid', 'invalid1', 'invalid2:',
+                       'description:name:something']
+        mapping = self.identity_api.driver.user._parse_extra_attrs(option_list)
+        expected_dict = {'description': 'name', 'gecos': 'password',
+                         'fake': 'invalid', 'invalid2': ''}
+        self.assertDictEqual(expected_dict, mapping)
+
+# TODO(henry-nash): These need to be removed when the full LDAP implementation
+# is submitted - see Bugs 1092187, 1101287, 1101276, 1101289
+
+    def test_domain_crud(self):
+        domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                  'enabled': True, 'description': uuid.uuid4().hex}
+        self.assertRaises(exception.Forbidden,
+                          self.resource_api.create_domain,
+                          domain['id'],
+                          domain)
+        self.assertRaises(exception.Conflict,
+                          self.resource_api.create_domain,
+                          CONF.identity.default_domain_id,
+                          domain)
+        self.assertRaises(exception.DomainNotFound,
+                          self.resource_api.get_domain,
+                          domain['id'])
+
+        domain['description'] = uuid.uuid4().hex
+        self.assertRaises(exception.DomainNotFound,
+                          self.resource_api.update_domain,
+                          domain['id'],
+                          domain)
+        self.assertRaises(exception.Forbidden,
+                          self.resource_api.update_domain,
+                          CONF.identity.default_domain_id,
+                          domain)
+        self.assertRaises(exception.DomainNotFound,
+                          self.resource_api.get_domain,
+                          domain['id'])
+        self.assertRaises(exception.DomainNotFound,
+                          self.resource_api.delete_domain,
+                          domain['id'])
+        self.assertRaises(exception.Forbidden,
+                          self.resource_api.delete_domain,
+                          CONF.identity.default_domain_id)
+        self.assertRaises(exception.DomainNotFound,
+                          self.resource_api.get_domain,
+                          domain['id'])
+
+    @tests.skip_if_no_multiple_domains_support
+    def test_create_domain_case_sensitivity(self):
+        # domains are read-only, so case sensitivity isn't an issue
+        ref = {
+            'id': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex}
+        self.assertRaises(exception.Forbidden,
+                          self.resource_api.create_domain,
+                          ref['id'],
+                          ref)
+
+    def test_cache_layer_domain_crud(self):
+        # TODO(morganfainberg): This also needs to be removed when full LDAP
+        # implementation is submitted.  No need to duplicate the above test,
+        # just skip this time.
+        self.skipTest('Domains are read-only against LDAP')
+
+    def test_domain_rename_invalidates_get_domain_by_name_cache(self):
+        parent = super(LDAPIdentity, self)
+        self.assertRaises(
+            exception.Forbidden,
+            parent.test_domain_rename_invalidates_get_domain_by_name_cache)
+
+    def test_project_rename_invalidates_get_project_by_name_cache(self):
+        parent = super(LDAPIdentity, self)
+        self.assertRaises(
+            exception.Forbidden,
+            parent.test_project_rename_invalidates_get_project_by_name_cache)
+
+    def test_project_crud(self):
+        # NOTE(topol): LDAP implementation does not currently support the
+        #              updating of a project name so this method override
+        #              provides a different update test
+        project = {'id': uuid.uuid4().hex,
+                   'name': uuid.uuid4().hex,
+                   'domain_id': CONF.identity.default_domain_id,
+                   'description': uuid.uuid4().hex,
+                   'enabled': True,
+                   'parent_id': None}
+        self.resource_api.create_project(project['id'], project)
+        project_ref = self.resource_api.get_project(project['id'])
+
+        self.assertDictEqual(project_ref, project)
+
+        project['description'] = uuid.uuid4().hex
+        self.resource_api.update_project(project['id'], project)
+        project_ref = self.resource_api.get_project(project['id'])
+        self.assertDictEqual(project_ref, project)
+
+        self.resource_api.delete_project(project['id'])
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project,
+                          project['id'])
+
+    @tests.skip_if_cache_disabled('assignment')
+    def test_cache_layer_project_crud(self):
+        # NOTE(morganfainberg): LDAP implementation does not currently support
+        # updating project names.  This method override provides a different
+        # update test.
+        project = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                   'domain_id': CONF.identity.default_domain_id,
+                   'description': uuid.uuid4().hex}
+        project_id = project['id']
+        # Create a project
+        self.resource_api.create_project(project_id, project)
+        self.resource_api.get_project(project_id)
+        updated_project = copy.deepcopy(project)
+        updated_project['description'] = uuid.uuid4().hex
+        # Update project, bypassing resource manager
+        self.resource_api.driver.update_project(project_id,
+                                                updated_project)
+        # Verify get_project still returns the original project_ref
+        self.assertDictContainsSubset(
+            project, self.resource_api.get_project(project_id))
+        # Invalidate cache
+        self.resource_api.get_project.invalidate(self.resource_api,
+                                                 project_id)
+        # Verify get_project now returns the new project
+        self.assertDictContainsSubset(
+            updated_project,
+            self.resource_api.get_project(project_id))
+        # Update project using the resource_api manager back to original
+        self.resource_api.update_project(project['id'], project)
+        # Verify get_project returns the original project_ref
+        self.assertDictContainsSubset(
+            project, self.resource_api.get_project(project_id))
+        # Delete project bypassing resource_api
+        self.resource_api.driver.delete_project(project_id)
+        # Verify get_project still returns the project_ref
+        self.assertDictContainsSubset(
+            project, self.resource_api.get_project(project_id))
+        # Invalidate cache
+        self.resource_api.get_project.invalidate(self.resource_api,
+                                                 project_id)
+        # Verify ProjectNotFound now raised
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project,
+                          project_id)
+        # recreate project
+        self.resource_api.create_project(project_id, project)
+        self.resource_api.get_project(project_id)
+        # delete project
+        self.resource_api.delete_project(project_id)
+        # Verify ProjectNotFound is raised
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project,
+                          project_id)
+
+    def _assert_create_hierarchy_not_allowed(self):
+        domain = self._get_domain_fixture()
+
+        project1 = {'id': uuid.uuid4().hex,
+                    'name': uuid.uuid4().hex,
+                    'description': '',
+                    'domain_id': domain['id'],
+                    'enabled': True,
+                    'parent_id': None}
+        self.resource_api.create_project(project1['id'], project1)
+
+        # Creating project2 under project1. LDAP will not allow
+        # the creation of a project with parent_id being set
+        project2 = {'id': uuid.uuid4().hex,
+                    'name': uuid.uuid4().hex,
+                    'description': '',
+                    'domain_id': domain['id'],
+                    'enabled': True,
+                    'parent_id': project1['id']}
+
+        self.assertRaises(exception.InvalidParentProject,
+                          self.resource_api.create_project,
+                          project2['id'],
+                          project2)
+
+        # Now, we'll create project 2 with no parent
+        project2['parent_id'] = None
+        self.resource_api.create_project(project2['id'], project2)
+
+        # Returning projects to be used across the tests
+        return [project1, project2]
+
+    def test_check_leaf_projects(self):
+        projects = self._assert_create_hierarchy_not_allowed()
+        for project in projects:
+            self.assertTrue(self.resource_api.is_leaf_project(project))
+
+    def test_list_projects_in_subtree(self):
+        projects = self._assert_create_hierarchy_not_allowed()
+        for project in projects:
+            subtree_list = self.resource_api.list_projects_in_subtree(
+                project)
+            self.assertEqual(0, len(subtree_list))
+
+    def test_list_project_parents(self):
+        projects = self._assert_create_hierarchy_not_allowed()
+        for project in projects:
+            parents_list = self.resource_api.list_project_parents(project)
+            self.assertEqual(0, len(parents_list))
+
+    def test_hierarchical_projects_crud(self):
+        self._assert_create_hierarchy_not_allowed()
+
+    def test_create_project_under_disabled_one(self):
+        self._assert_create_hierarchy_not_allowed()
+
+    def test_create_project_with_invalid_parent(self):
+        self._assert_create_hierarchy_not_allowed()
+
+    def test_create_leaf_project_with_invalid_domain(self):
+        self._assert_create_hierarchy_not_allowed()
+
+    def test_update_project_parent(self):
+        self._assert_create_hierarchy_not_allowed()
+
+    def test_enable_project_with_disabled_parent(self):
+        self._assert_create_hierarchy_not_allowed()
+
+    def test_disable_hierarchical_leaf_project(self):
+        self._assert_create_hierarchy_not_allowed()
+
+    def test_disable_hierarchical_not_leaf_project(self):
+        self._assert_create_hierarchy_not_allowed()
+
+    def test_delete_hierarchical_leaf_project(self):
+        self._assert_create_hierarchy_not_allowed()
+
+    def test_delete_hierarchical_not_leaf_project(self):
+        self._assert_create_hierarchy_not_allowed()
+
+    def test_check_hierarchy_depth(self):
+        projects = self._assert_create_hierarchy_not_allowed()
+        for project in projects:
+            depth = self._get_hierarchy_depth(project['id'])
+            self.assertEqual(1, depth)
+
+    def test_multi_role_grant_by_user_group_on_project_domain(self):
+        # This is a partial implementation of the standard test that
+        # is defined in test_backend.py.  It omits both domain and
+        # group grants. since neither of these are yet supported by
+        # the ldap backend.
+
+        role_list = []
+        for _ in range(2):
+            role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+            self.role_api.create_role(role['id'], role)
+            role_list.append(role)
+
+        user1 = {'name': uuid.uuid4().hex,
+                 'domain_id': CONF.identity.default_domain_id,
+                 'password': uuid.uuid4().hex,
+                 'enabled': True}
+        user1 = self.identity_api.create_user(user1)
+        project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': CONF.identity.default_domain_id}
+        self.resource_api.create_project(project1['id'], project1)
+
+        self.assignment_api.add_role_to_user_and_project(
+            user_id=user1['id'],
+            tenant_id=project1['id'],
+            role_id=role_list[0]['id'])
+        self.assignment_api.add_role_to_user_and_project(
+            user_id=user1['id'],
+            tenant_id=project1['id'],
+            role_id=role_list[1]['id'])
+
+        # Although list_grants are not yet supported, we can test the
+        # alternate way of getting back lists of grants, where user
+        # and group roles are combined.  Only directly assigned user
+        # roles are available, since group grants are not yet supported
+
+        combined_list = self.assignment_api.get_roles_for_user_and_project(
+            user1['id'],
+            project1['id'])
+        self.assertEqual(2, len(combined_list))
+        self.assertIn(role_list[0]['id'], combined_list)
+        self.assertIn(role_list[1]['id'], combined_list)
+
+        # Finally, although domain roles are not implemented, check we can
+        # issue the combined get roles call with benign results, since thus is
+        # used in token generation
+
+        combined_role_list = self.assignment_api.get_roles_for_user_and_domain(
+            user1['id'], CONF.identity.default_domain_id)
+        self.assertEqual(0, len(combined_role_list))
+
+    def test_list_projects_for_alternate_domain(self):
+        self.skipTest(
+            'N/A: LDAP does not support multiple domains')
+
+    def test_get_default_domain_by_name(self):
+        domain = self._get_domain_fixture()
+
+        domain_ref = self.resource_api.get_domain_by_name(domain['name'])
+        self.assertEqual(domain_ref, domain)
+
+    def test_base_ldap_connection_deref_option(self):
+        def get_conn(deref_name):
+            self.config_fixture.config(group='ldap',
+                                       alias_dereferencing=deref_name)
+            base_ldap = common_ldap.BaseLdap(CONF)
+            return base_ldap.get_connection()
+
+        conn = get_conn('default')
+        self.assertEqual(ldap.get_option(ldap.OPT_DEREF),
+                         conn.get_option(ldap.OPT_DEREF))
+
+        conn = get_conn('always')
+        self.assertEqual(ldap.DEREF_ALWAYS,
+                         conn.get_option(ldap.OPT_DEREF))
+
+        conn = get_conn('finding')
+        self.assertEqual(ldap.DEREF_FINDING,
+                         conn.get_option(ldap.OPT_DEREF))
+
+        conn = get_conn('never')
+        self.assertEqual(ldap.DEREF_NEVER,
+                         conn.get_option(ldap.OPT_DEREF))
+
+        conn = get_conn('searching')
+        self.assertEqual(ldap.DEREF_SEARCHING,
+                         conn.get_option(ldap.OPT_DEREF))
+
+    def test_list_users_no_dn(self):
+        users = self.identity_api.list_users()
+        self.assertEqual(len(default_fixtures.USERS), len(users))
+        user_ids = set(user['id'] for user in users)
+        expected_user_ids = set(getattr(self, 'user_%s' % user['id'])['id']
+                                for user in default_fixtures.USERS)
+        for user_ref in users:
+            self.assertNotIn('dn', user_ref)
+        self.assertEqual(expected_user_ids, user_ids)
+
+    def test_list_groups_no_dn(self):
+        # Create some test groups.
+        domain = self._get_domain_fixture()
+        expected_group_ids = []
+        numgroups = 3
+        for _ in range(numgroups):
+            group = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
+            group = self.identity_api.create_group(group)
+            expected_group_ids.append(group['id'])
+        # Fetch the test groups and ensure that they don't contain a dn.
+        groups = self.identity_api.list_groups()
+        self.assertEqual(numgroups, len(groups))
+        group_ids = set(group['id'] for group in groups)
+        for group_ref in groups:
+            self.assertNotIn('dn', group_ref)
+        self.assertEqual(set(expected_group_ids), group_ids)
+
+    def test_list_groups_for_user_no_dn(self):
+        # Create a test user.
+        user = {'name': uuid.uuid4().hex,
+                'domain_id': CONF.identity.default_domain_id,
+                'password': uuid.uuid4().hex, 'enabled': True}
+        user = self.identity_api.create_user(user)
+        # Create some test groups and add the test user as a member.
+        domain = self._get_domain_fixture()
+        expected_group_ids = []
+        numgroups = 3
+        for _ in range(numgroups):
+            group = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
+            group = self.identity_api.create_group(group)
+            expected_group_ids.append(group['id'])
+            self.identity_api.add_user_to_group(user['id'], group['id'])
+        # Fetch the groups for the test user
+        # and ensure they don't contain a dn.
+        groups = self.identity_api.list_groups_for_user(user['id'])
+        self.assertEqual(numgroups, len(groups))
+        group_ids = set(group['id'] for group in groups)
+        for group_ref in groups:
+            self.assertNotIn('dn', group_ref)
+        self.assertEqual(set(expected_group_ids), group_ids)
+
+    def test_user_id_attribute_in_create(self):
+        conf = self.get_config(CONF.identity.default_domain_id)
+        conf.ldap.user_id_attribute = 'mail'
+        self.reload_backends(CONF.identity.default_domain_id)
+
+        user = {'name': u'fäké1',
+                'password': u'fäképass1',
+                'domain_id': CONF.identity.default_domain_id}
+        user = self.identity_api.create_user(user)
+        user_ref = self.identity_api.get_user(user['id'])
+        # 'email' attribute should've created because it is also being used
+        # as user_id
+        self.assertEqual(user_ref['id'], user_ref['email'])
+
+    def test_user_id_attribute_map(self):
+        conf = self.get_config(CONF.identity.default_domain_id)
+        conf.ldap.user_id_attribute = 'mail'
+        self.reload_backends(CONF.identity.default_domain_id)
+
+        user_ref = self.identity_api.get_user(self.user_foo['email'])
+        # the user_id_attribute map should be honored, which means
+        # user_ref['id'] should contains the email attribute
+        self.assertEqual(self.user_foo['email'], user_ref['id'])
+
+    @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
+    def test_get_id_from_dn_for_multivalued_attribute_id(self, mock_ldap_get):
+        conf = self.get_config(CONF.identity.default_domain_id)
+        conf.ldap.user_id_attribute = 'mail'
+        self.reload_backends(CONF.identity.default_domain_id)
+
+        # make 'email' multivalued so we can test the error condition
+        email1 = uuid.uuid4().hex
+        email2 = uuid.uuid4().hex
+        mock_ldap_get.return_value = (
+            'cn=nobodycares,dc=example,dc=com',
+            {
+                'sn': [uuid.uuid4().hex],
+                'mail': [email1, email2],
+                'cn': 'nobodycares'
+            }
+        )
+
+        user_ref = self.identity_api.get_user(email1)
+        # make sure we get the ID from DN (old behavior) if the ID attribute
+        # has multiple values
+        self.assertEqual('nobodycares', user_ref['id'])
+
+    @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
+    def test_id_attribute_not_found(self, mock_ldap_get):
+        mock_ldap_get.return_value = (
+            'cn=nobodycares,dc=example,dc=com',
+            {
+                'sn': [uuid.uuid4().hex],
+            }
+        )
+
+        user_api = identity.backends.ldap.UserApi(CONF)
+        self.assertRaises(exception.NotFound,
+                          user_api.get,
+                          'nobodycares')
+
+    @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
+    def test_user_id_not_in_dn(self, mock_ldap_get):
+        conf = self.get_config(CONF.identity.default_domain_id)
+        conf.ldap.user_id_attribute = 'uid'
+        conf.ldap.user_name_attribute = 'cn'
+        self.reload_backends(CONF.identity.default_domain_id)
+
+        mock_ldap_get.return_value = (
+            'foo=bar,dc=example,dc=com',
+            {
+                'sn': [uuid.uuid4().hex],
+                'foo': ['bar'],
+                'cn': ['junk'],
+                'uid': ['crap']
+            }
+        )
+        user_ref = self.identity_api.get_user('crap')
+        self.assertEqual('crap', user_ref['id'])
+        self.assertEqual('junk', user_ref['name'])
+
+    @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
+    def test_user_name_in_dn(self, mock_ldap_get):
+        conf = self.get_config(CONF.identity.default_domain_id)
+        conf.ldap.user_id_attribute = 'sAMAccountName'
+        conf.ldap.user_name_attribute = 'cn'
+        self.reload_backends(CONF.identity.default_domain_id)
+
+        mock_ldap_get.return_value = (
+            'cn=Foo Bar,dc=example,dc=com',
+            {
+                'sn': [uuid.uuid4().hex],
+                'cn': ['Foo Bar'],
+                'SAMAccountName': ['crap']
+            }
+        )
+        user_ref = self.identity_api.get_user('crap')
+        self.assertEqual('crap', user_ref['id'])
+        self.assertEqual('Foo Bar', user_ref['name'])
+
+
+class LDAPIdentityEnabledEmulation(LDAPIdentity):
+    def setUp(self):
+        super(LDAPIdentityEnabledEmulation, self).setUp()
+        self.clear_database()
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+        for obj in [self.tenant_bar, self.tenant_baz, self.user_foo,
+                    self.user_two, self.user_badguy]:
+            obj.setdefault('enabled', True)
+
+    def load_fixtures(self, fixtures):
+        # Override super impl since need to create group container.
+        create_group_container(self.identity_api)
+        super(LDAPIdentity, self).load_fixtures(fixtures)
+
+    def config_files(self):
+        config_files = super(LDAPIdentityEnabledEmulation, self).config_files()
+        config_files.append(tests.dirs.tests_conf('backend_ldap.conf'))
+        return config_files
+
+    def config_overrides(self):
+        super(LDAPIdentityEnabledEmulation, self).config_overrides()
+        self.config_fixture.config(group='ldap',
+                                   user_enabled_emulation=True,
+                                   project_enabled_emulation=True)
+
+    def test_project_crud(self):
+        # NOTE(topol): LDAPIdentityEnabledEmulation will create an
+        #              enabled key in the project dictionary so this
+        #              method override handles this side-effect
+        project = {
+            'id': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'domain_id': CONF.identity.default_domain_id,
+            'description': uuid.uuid4().hex,
+            'parent_id': None}
+
+        self.resource_api.create_project(project['id'], project)
+        project_ref = self.resource_api.get_project(project['id'])
+
+        # self.resource_api.create_project adds an enabled
+        # key with a value of True when LDAPIdentityEnabledEmulation
+        # is used so we now add this expected key to the project dictionary
+        project['enabled'] = True
+        self.assertDictEqual(project_ref, project)
+
+        project['description'] = uuid.uuid4().hex
+        self.resource_api.update_project(project['id'], project)
+        project_ref = self.resource_api.get_project(project['id'])
+        self.assertDictEqual(project_ref, project)
+
+        self.resource_api.delete_project(project['id'])
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project,
+                          project['id'])
+
+    def test_user_crud(self):
+        user_dict = {
+            'domain_id': CONF.identity.default_domain_id,
+            'name': uuid.uuid4().hex,
+            'password': uuid.uuid4().hex}
+        user = self.identity_api.create_user(user_dict)
+        user_dict['enabled'] = True
+        user_ref = self.identity_api.get_user(user['id'])
+        del user_dict['password']
+        user_ref_dict = {x: user_ref[x] for x in user_ref}
+        self.assertDictContainsSubset(user_dict, user_ref_dict)
+
+        user_dict['password'] = uuid.uuid4().hex
+        self.identity_api.update_user(user['id'], user)
+        user_ref = self.identity_api.get_user(user['id'])
+        del user_dict['password']
+        user_ref_dict = {x: user_ref[x] for x in user_ref}
+        self.assertDictContainsSubset(user_dict, user_ref_dict)
+
+        self.identity_api.delete_user(user['id'])
+        self.assertRaises(exception.UserNotFound,
+                          self.identity_api.get_user,
+                          user['id'])
+
+    def test_user_auth_emulated(self):
+        self.config_fixture.config(group='ldap',
+                                   user_enabled_emulation_dn='cn=test,dc=test')
+        self.reload_backends(CONF.identity.default_domain_id)
+        self.identity_api.authenticate(
+            context={},
+            user_id=self.user_foo['id'],
+            password=self.user_foo['password'])
+
+    def test_user_enable_attribute_mask(self):
+        self.skipTest(
+            "Enabled emulation conflicts with enabled mask")
+
+    def test_user_enabled_invert(self):
+        self.config_fixture.config(group='ldap', user_enabled_invert=True,
+                                   user_enabled_default=False)
+        self.clear_database()
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+
+        user1 = {'name': u'fäké1', 'enabled': True,
+                 'domain_id': CONF.identity.default_domain_id}
+
+        user2 = {'name': u'fäké2', 'enabled': False,
+                 'domain_id': CONF.identity.default_domain_id}
+
+        user3 = {'name': u'fäké3',
+                 'domain_id': CONF.identity.default_domain_id}
+
+        # Ensure that the enabled LDAP attribute is not set for a
+        # newly created enabled user.
+        user_ref = self.identity_api.create_user(user1)
+        self.assertIs(True, user_ref['enabled'])
+        self.assertIsNone(self.get_user_enabled_vals(user_ref))
+        user_ref = self.identity_api.get_user(user_ref['id'])
+        self.assertIs(True, user_ref['enabled'])
+
+        # Ensure that an enabled LDAP attribute is not set for a disabled user.
+        user1['enabled'] = False
+        user_ref = self.identity_api.update_user(user_ref['id'], user1)
+        self.assertIs(False, user_ref['enabled'])
+        self.assertIsNone(self.get_user_enabled_vals(user_ref))
+
+        # Enable the user and ensure that the LDAP enabled
+        # attribute is not set.
+        user1['enabled'] = True
+        user_ref = self.identity_api.update_user(user_ref['id'], user1)
+        self.assertIs(True, user_ref['enabled'])
+        self.assertIsNone(self.get_user_enabled_vals(user_ref))
+
+        # Ensure that the LDAP enabled attribute is not set for a
+        # newly created disabled user.
+        user_ref = self.identity_api.create_user(user2)
+        self.assertIs(False, user_ref['enabled'])
+        self.assertIsNone(self.get_user_enabled_vals(user_ref))
+        user_ref = self.identity_api.get_user(user_ref['id'])
+        self.assertIs(False, user_ref['enabled'])
+
+        # Ensure that the LDAP enabled attribute is not set for a newly created
+        # user when the user_enabled_default setting is used.
+        user_ref = self.identity_api.create_user(user3)
+        self.assertIs(True, user_ref['enabled'])
+        self.assertIsNone(self.get_user_enabled_vals(user_ref))
+        user_ref = self.identity_api.get_user(user_ref['id'])
+        self.assertIs(True, user_ref['enabled'])
+
+    def test_user_enabled_invert_no_enabled_value(self):
+        self.skipTest(
+            "N/A: Covered by test_user_enabled_invert")
+
+    def test_user_enabled_invert_default_str_value(self):
+        self.skipTest(
+            "N/A: Covered by test_user_enabled_invert")
+
+    @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
+    def test_user_enabled_attribute_handles_utf8(self, mock_ldap_get):
+        # Since user_enabled_emulation is enabled in this test, this test will
+        # fail since it's using user_enabled_invert.
+        self.config_fixture.config(group='ldap', user_enabled_invert=True,
+                                   user_enabled_attribute='passwordisexpired')
+        mock_ldap_get.return_value = (
+            u'uid=123456789,c=us,ou=our_ldap,o=acme.com',
+            {
+                'uid': [123456789],
+                'mail': [u'shaun@acme.com'],
+                'passwordisexpired': [u'false'],
+                'cn': [u'uid=123456789,c=us,ou=our_ldap,o=acme.com']
+            }
+        )
+
+        user_api = identity.backends.ldap.UserApi(CONF)
+        user_ref = user_api.get('123456789')
+        self.assertIs(False, user_ref['enabled'])
+
+
+class LdapIdentitySqlAssignment(BaseLDAPIdentity, tests.SQLDriverOverrides,
+                                tests.TestCase):
+
+    def config_files(self):
+        config_files = super(LdapIdentitySqlAssignment, self).config_files()
+        config_files.append(tests.dirs.tests_conf('backend_ldap_sql.conf'))
+        return config_files
+
+    def setUp(self):
+        self.useFixture(database.Database())
+        super(LdapIdentitySqlAssignment, self).setUp()
+        self.clear_database()
+        self.load_backends()
+        cache.configure_cache_region(cache.REGION)
+        self.engine = sql.get_engine()
+        self.addCleanup(sql.cleanup)
+
+        sql.ModelBase.metadata.create_all(bind=self.engine)
+        self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
+
+        self.load_fixtures(default_fixtures)
+        # defaulted by the data load
+        self.user_foo['enabled'] = True
+
+    def config_overrides(self):
+        super(LdapIdentitySqlAssignment, self).config_overrides()
+        self.config_fixture.config(
+            group='identity',
+            driver='keystone.identity.backends.ldap.Identity')
+        self.config_fixture.config(
+            group='resource',
+            driver='keystone.resource.backends.sql.Resource')
+        self.config_fixture.config(
+            group='assignment',
+            driver='keystone.assignment.backends.sql.Assignment')
+
+    def test_domain_crud(self):
+        pass
+
+    def test_list_domains(self):
+        domains = self.resource_api.list_domains()
+        self.assertEqual([resource.calc_default_domain()], domains)
+
+    def test_list_domains_non_default_domain_id(self):
+        # If change the default_domain_id, the ID of the default domain
+        # returned by list_domains doesn't change because the SQL identity
+        # backend reads it from the database, which doesn't get updated by
+        # config change.
+
+        orig_default_domain_id = CONF.identity.default_domain_id
+
+        new_domain_id = uuid.uuid4().hex
+        self.config_fixture.config(group='identity',
+                                   default_domain_id=new_domain_id)
+
+        domains = self.resource_api.list_domains()
+
+        self.assertEqual(orig_default_domain_id, domains[0]['id'])
+
+    def test_create_domain(self):
+        domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                  'enabled': True}
+        self.assertRaises(exception.Forbidden,
+                          self.resource_api.create_domain,
+                          domain['id'],
+                          domain)
+
+    def test_get_and_remove_role_grant_by_group_and_domain(self):
+        # TODO(henry-nash): We should really rewrite the tests in test_backend
+        # to be more flexible as to where the domains are sourced from, so
+        # that we would not need to override such tests here. This is raised
+        # as bug 1373865.
+        new_domain = self._get_domain_fixture()
+        new_group = {'domain_id': new_domain['id'], 'name': uuid.uuid4().hex}
+        new_group = self.identity_api.create_group(new_group)
+        new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
+                    'enabled': True, 'domain_id': new_domain['id']}
+        new_user = self.identity_api.create_user(new_user)
+        self.identity_api.add_user_to_group(new_user['id'],
+                                            new_group['id'])
+
+        roles_ref = self.assignment_api.list_grants(
+            group_id=new_group['id'],
+            domain_id=new_domain['id'])
+        self.assertEqual(0, len(roles_ref))
+
+        self.assignment_api.create_grant(group_id=new_group['id'],
+                                         domain_id=new_domain['id'],
+                                         role_id='member')
+
+        roles_ref = self.assignment_api.list_grants(
+            group_id=new_group['id'],
+            domain_id=new_domain['id'])
+        self.assertDictEqual(roles_ref[0], self.role_member)
+
+        self.assignment_api.delete_grant(group_id=new_group['id'],
+                                         domain_id=new_domain['id'],
+                                         role_id='member')
+        roles_ref = self.assignment_api.list_grants(
+            group_id=new_group['id'],
+            domain_id=new_domain['id'])
+        self.assertEqual(0, len(roles_ref))
+        self.assertRaises(exception.NotFound,
+                          self.assignment_api.delete_grant,
+                          group_id=new_group['id'],
+                          domain_id=new_domain['id'],
+                          role_id='member')
+
+    def test_project_enabled_ignored_disable_error(self):
+        # Override
+        self.skipTest("Doesn't apply since LDAP configuration is ignored for "
+                      "SQL assignment backend.")
+
+
+class LdapIdentitySqlAssignmentWithMapping(LdapIdentitySqlAssignment):
+    """Class to test mapping of default LDAP backend.
+
+    The default configuration is not to enable mapping when using a single
+    backend LDAP driver.  However, a cloud provider might want to enable
+    the mapping, hence hiding the LDAP IDs from any clients of keystone.
+    Setting backward_compatible_ids to False will enable this mapping.
+
+    """
+    def config_overrides(self):
+        super(LdapIdentitySqlAssignmentWithMapping, self).config_overrides()
+        self.config_fixture.config(group='identity_mapping',
+                                   backward_compatible_ids=False)
+
+    def test_dynamic_mapping_build(self):
+        """Test to ensure entities not create via controller are mapped.
+
+        Many LDAP backends will, essentially, by Read Only. In these cases
+        the mapping is not built by creating objects, rather from enumerating
+        the entries.  We test this here my manually deleting the mapping and
+        then trying to re-read the entries.
+
+        """
+        initial_mappings = len(mapping_sql.list_id_mappings())
+        user1 = {'name': uuid.uuid4().hex,
+                 'domain_id': CONF.identity.default_domain_id,
+                 'password': uuid.uuid4().hex, 'enabled': True}
+        user1 = self.identity_api.create_user(user1)
+        user2 = {'name': uuid.uuid4().hex,
+                 'domain_id': CONF.identity.default_domain_id,
+                 'password': uuid.uuid4().hex, 'enabled': True}
+        user2 = self.identity_api.create_user(user2)
+        mappings = mapping_sql.list_id_mappings()
+        self.assertEqual(initial_mappings + 2, len(mappings))
+
+        # Now delete the mappings for the two users above
+        self.id_mapping_api.purge_mappings({'public_id': user1['id']})
+        self.id_mapping_api.purge_mappings({'public_id': user2['id']})
+
+        # We should no longer be able to get these users via their old IDs
+        self.assertRaises(exception.UserNotFound,
+                          self.identity_api.get_user,
+                          user1['id'])
+        self.assertRaises(exception.UserNotFound,
+                          self.identity_api.get_user,
+                          user2['id'])
+
+        # Now enumerate all users...this should re-build the mapping, and
+        # we should be able to find the users via their original public IDs.
+        self.identity_api.list_users()
+        self.identity_api.get_user(user1['id'])
+        self.identity_api.get_user(user2['id'])
+
+    def test_get_roles_for_user_and_project_user_group_same_id(self):
+        self.skipTest('N/A: We never generate the same ID for a user and '
+                      'group in our mapping table')
+
+
+class BaseMultiLDAPandSQLIdentity(object):
+    """Mixin class with support methods for domain-specific config testing."""
+
+    def create_user(self, domain_id):
+        user = {'name': uuid.uuid4().hex,
+                'domain_id': domain_id,
+                'password': uuid.uuid4().hex,
+                'enabled': True}
+        user_ref = self.identity_api.create_user(user)
+        # Put the password back in, since this is used later by tests to
+        # authenticate.
+        user_ref['password'] = user['password']
+        return user_ref
+
+    def create_users_across_domains(self):
+        """Create a set of users, each with a role on their own domain."""
+
+        # We also will check that the right number of id mappings get created
+        initial_mappings = len(mapping_sql.list_id_mappings())
+
+        self.users['user0'] = self.create_user(
+            self.domains['domain_default']['id'])
+        self.assignment_api.create_grant(
+            user_id=self.users['user0']['id'],
+            domain_id=self.domains['domain_default']['id'],
+            role_id=self.role_member['id'])
+        for x in range(1, self.domain_count):
+            self.users['user%s' % x] = self.create_user(
+                self.domains['domain%s' % x]['id'])
+            self.assignment_api.create_grant(
+                user_id=self.users['user%s' % x]['id'],
+                domain_id=self.domains['domain%s' % x]['id'],
+                role_id=self.role_member['id'])
+
+        # So how many new id mappings should have been created? One for each
+        # user created in a domain that is using the non default driver..
+        self.assertEqual(initial_mappings + self.domain_specific_count,
+                         len(mapping_sql.list_id_mappings()))
+
+    def check_user(self, user, domain_id, expected_status):
+        """Check user is in correct backend.
+
+        As part of the tests, we want to force ourselves to manually
+        select the driver for a given domain, to make sure the entity
+        ended up in the correct backend.
+
+        """
+        driver = self.identity_api._select_identity_driver(domain_id)
+        unused, unused, entity_id = (
+            self.identity_api._get_domain_driver_and_entity_id(
+                user['id']))
+
+        if expected_status == 200:
+            ref = driver.get_user(entity_id)
+            ref = self.identity_api._set_domain_id_and_mapping(
+                ref, domain_id, driver, map.EntityType.USER)
+            user = user.copy()
+            del user['password']
+            self.assertDictEqual(ref, user)
+        else:
+            # TODO(henry-nash): Use AssertRaises here, although
+            # there appears to be an issue with using driver.get_user
+            # inside that construct
+            try:
+                driver.get_user(entity_id)
+            except expected_status:
+                pass
+
+    def setup_initial_domains(self):
+
+        def create_domain(domain):
+            try:
+                ref = self.resource_api.create_domain(
+                    domain['id'], domain)
+            except exception.Conflict:
+                ref = (
+                    self.resource_api.get_domain_by_name(domain['name']))
+            return ref
+
+        self.domains = {}
+        for x in range(1, self.domain_count):
+            domain = 'domain%s' % x
+            self.domains[domain] = create_domain(
+                {'id': uuid.uuid4().hex, 'name': domain})
+        self.domains['domain_default'] = create_domain(
+            resource.calc_default_domain())
+
+    def test_authenticate_to_each_domain(self):
+        """Test that a user in each domain can authenticate."""
+        for user_num in range(self.domain_count):
+            user = 'user%s' % user_num
+            self.identity_api.authenticate(
+                context={},
+                user_id=self.users[user]['id'],
+                password=self.users[user]['password'])
+
+
+class MultiLDAPandSQLIdentity(BaseLDAPIdentity, tests.SQLDriverOverrides,
+                              tests.TestCase, BaseMultiLDAPandSQLIdentity):
+    """Class to test common SQL plus individual LDAP backends.
+
+    We define a set of domains and domain-specific backends:
+
+    - A separate LDAP backend for the default domain
+    - A separate LDAP backend for domain1
+    - domain2 shares the same LDAP as domain1, but uses a different
+      tree attach point
+    - An SQL backend for all other domains (which will include domain3
+      and domain4)
+
+    Normally one would expect that the default domain would be handled as
+    part of the "other domains" - however the above provides better
+    test coverage since most of the existing backend tests use the default
+    domain.
+
+    """
+    def setUp(self):
+        self.useFixture(database.Database())
+        super(MultiLDAPandSQLIdentity, self).setUp()
+
+        self.load_backends()
+
+        self.engine = sql.get_engine()
+        self.addCleanup(sql.cleanup)
+
+        sql.ModelBase.metadata.create_all(bind=self.engine)
+        self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
+
+        self.domain_count = 5
+        self.domain_specific_count = 3
+        self.setup_initial_domains()
+        self._setup_initial_users()
+
+        # All initial test data setup complete, time to switch on support
+        # for separate backends per domain.
+        self.enable_multi_domain()
+
+        self.clear_database()
+        self.load_fixtures(default_fixtures)
+        self.create_users_across_domains()
+
+    def config_overrides(self):
+        super(MultiLDAPandSQLIdentity, self).config_overrides()
+        # Make sure identity and assignment are actually SQL drivers,
+        # BaseLDAPIdentity sets these options to use LDAP.
+        self.config_fixture.config(
+            group='identity',
+            driver='keystone.identity.backends.sql.Identity')
+        self.config_fixture.config(
+            group='resource',
+            driver='keystone.resource.backends.sql.Resource')
+        self.config_fixture.config(
+            group='assignment',
+            driver='keystone.assignment.backends.sql.Assignment')
+
+    def _setup_initial_users(self):
+        # Create some identity entities BEFORE we switch to multi-backend, so
+        # we can test that these are still accessible
+        self.users = {}
+        self.users['userA'] = self.create_user(
+            self.domains['domain_default']['id'])
+        self.users['userB'] = self.create_user(
+            self.domains['domain1']['id'])
+        self.users['userC'] = self.create_user(
+            self.domains['domain3']['id'])
+
+    def enable_multi_domain(self):
+        """Enable the chosen form of multi domain configuration support.
+
+        This method enables the file-based configuration support. Child classes
+        that wish to use the database domain configuration support should
+        override this method and set the appropriate config_fixture option.
+
+        """
+        self.config_fixture.config(
+            group='identity', domain_specific_drivers_enabled=True,
+            domain_config_dir=tests.TESTCONF + '/domain_configs_multi_ldap')
+        self.config_fixture.config(group='identity_mapping',
+                                   backward_compatible_ids=False)
+
+    def reload_backends(self, domain_id):
+        # Just reload the driver for this domain - which will pickup
+        # any updated cfg
+        self.identity_api.domain_configs.reload_domain_driver(domain_id)
+
+    def get_config(self, domain_id):
+        # Get the config for this domain, will return CONF
+        # if no specific config defined for this domain
+        return self.identity_api.domain_configs.get_domain_conf(domain_id)
+
+    def test_list_domains(self):
+        self.skipTest(
+            'N/A: Not relevant for multi ldap testing')
+
+    def test_list_domains_non_default_domain_id(self):
+        self.skipTest(
+            'N/A: Not relevant for multi ldap testing')
+
+    def test_list_users(self):
+        # Override the standard list users, since we have added an extra user
+        # to the default domain, so the number of expected users is one more
+        # than in the standard test.
+        users = self.identity_api.list_users(
+            domain_scope=self._set_domain_scope(
+                CONF.identity.default_domain_id))
+        self.assertEqual(len(default_fixtures.USERS) + 1, len(users))
+        user_ids = set(user['id'] for user in users)
+        expected_user_ids = set(getattr(self, 'user_%s' % user['id'])['id']
+                                for user in default_fixtures.USERS)
+        expected_user_ids.add(self.users['user0']['id'])
+        for user_ref in users:
+            self.assertNotIn('password', user_ref)
+        self.assertEqual(expected_user_ids, user_ids)
+
+    def test_domain_segregation(self):
+        """Test that separate configs have segregated the domain.
+
+        Test Plan:
+
+        - Users were created in each domain as part of setup, now make sure
+          you can only find a given user in its relevant domain/backend
+        - Make sure that for a backend that supports multiple domains
+          you can get the users via any of its domains
+
+        """
+        # Check that I can read a user with the appropriate domain-selected
+        # driver, but won't find it via any other domain driver
+
+        check_user = self.check_user
+        check_user(self.users['user0'],
+                   self.domains['domain_default']['id'], 200)
+        for domain in [self.domains['domain1']['id'],
+                       self.domains['domain2']['id'],
+                       self.domains['domain3']['id'],
+                       self.domains['domain4']['id']]:
+            check_user(self.users['user0'], domain, exception.UserNotFound)
+
+        check_user(self.users['user1'], self.domains['domain1']['id'], 200)
+        for domain in [self.domains['domain_default']['id'],
+                       self.domains['domain2']['id'],
+                       self.domains['domain3']['id'],
+                       self.domains['domain4']['id']]:
+            check_user(self.users['user1'], domain, exception.UserNotFound)
+
+        check_user(self.users['user2'], self.domains['domain2']['id'], 200)
+        for domain in [self.domains['domain_default']['id'],
+                       self.domains['domain1']['id'],
+                       self.domains['domain3']['id'],
+                       self.domains['domain4']['id']]:
+            check_user(self.users['user2'], domain, exception.UserNotFound)
+
+        # domain3 and domain4 share the same backend, so you should be
+        # able to see user3 and user4 from either.
+
+        check_user(self.users['user3'], self.domains['domain3']['id'], 200)
+        check_user(self.users['user3'], self.domains['domain4']['id'], 200)
+        check_user(self.users['user4'], self.domains['domain3']['id'], 200)
+        check_user(self.users['user4'], self.domains['domain4']['id'], 200)
+
+        for domain in [self.domains['domain_default']['id'],
+                       self.domains['domain1']['id'],
+                       self.domains['domain2']['id']]:
+            check_user(self.users['user3'], domain, exception.UserNotFound)
+            check_user(self.users['user4'], domain, exception.UserNotFound)
+
+        # Finally, going through the regular manager layer, make sure we
+        # only see the right number of users in each of the non-default
+        # domains.  One might have expected two users in domain1 (since we
+        # created one before we switched to multi-backend), however since
+        # that domain changed backends in the switch we don't find it anymore.
+        # This is as designed - we don't support moving domains between
+        # backends.
+        #
+        # The listing of the default domain is already handled in the
+        # test_lists_users() method.
+        for domain in [self.domains['domain1']['id'],
+                       self.domains['domain2']['id'],
+                       self.domains['domain4']['id']]:
+            self.assertThat(
+                self.identity_api.list_users(domain_scope=domain),
+                matchers.HasLength(1))
+
+        # domain3 had a user created before we switched on
+        # multiple backends, plus one created afterwards - and its
+        # backend has not changed - so we should find two.
+        self.assertThat(
+            self.identity_api.list_users(
+                domain_scope=self.domains['domain3']['id']),
+            matchers.HasLength(2))
+
+    def test_existing_uuids_work(self):
+        """Test that 'uni-domain' created IDs still work.
+
+        Throwing the switch to domain-specific backends should not cause
+        existing identities to be inaccessible via ID.
+
+        """
+        self.identity_api.get_user(self.users['userA']['id'])
+        self.identity_api.get_user(self.users['userB']['id'])
+        self.identity_api.get_user(self.users['userC']['id'])
+
+    def test_scanning_of_config_dir(self):
+        """Test the Manager class scans the config directory.
+
+        The setup for the main tests above load the domain configs directly
+        so that the test overrides can be included. This test just makes sure
+        that the standard config directory scanning does pick up the relevant
+        domain config files.
+
+        """
+        # Confirm that config has drivers_enabled as True, which we will
+        # check has been set to False later in this test
+        self.assertTrue(CONF.identity.domain_specific_drivers_enabled)
+        self.load_backends()
+        # Execute any command to trigger the lazy loading of domain configs
+        self.identity_api.list_users(
+            domain_scope=self.domains['domain1']['id'])
+        # ...and now check the domain configs have been set up
+        self.assertIn('default', self.identity_api.domain_configs)
+        self.assertIn(self.domains['domain1']['id'],
+                      self.identity_api.domain_configs)
+        self.assertIn(self.domains['domain2']['id'],
+                      self.identity_api.domain_configs)
+        self.assertNotIn(self.domains['domain3']['id'],
+                         self.identity_api.domain_configs)
+        self.assertNotIn(self.domains['domain4']['id'],
+                         self.identity_api.domain_configs)
+
+        # Finally check that a domain specific config contains items from both
+        # the primary config and the domain specific config
+        conf = self.identity_api.domain_configs.get_domain_conf(
+            self.domains['domain1']['id'])
+        # This should now be false, as is the default, since this is not
+        # set in the standard primary config file
+        self.assertFalse(conf.identity.domain_specific_drivers_enabled)
+        # ..and make sure a domain-specific options is also set
+        self.assertEqual('fake://memory1', conf.ldap.url)
+
+    def test_delete_domain_with_user_added(self):
+        domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                  'enabled': True}
+        project = {'id': uuid.uuid4().hex,
+                   'name': uuid.uuid4().hex,
+                   'domain_id': domain['id'],
+                   'description': uuid.uuid4().hex,
+                   'parent_id': None,
+                   'enabled': True}
+        self.resource_api.create_domain(domain['id'], domain)
+        self.resource_api.create_project(project['id'], project)
+        project_ref = self.resource_api.get_project(project['id'])
+        self.assertDictEqual(project_ref, project)
+
+        self.assignment_api.create_grant(user_id=self.user_foo['id'],
+                                         project_id=project['id'],
+                                         role_id=self.role_member['id'])
+        self.assignment_api.delete_grant(user_id=self.user_foo['id'],
+                                         project_id=project['id'],
+                                         role_id=self.role_member['id'])
+        domain['enabled'] = False
+        self.resource_api.update_domain(domain['id'], domain)
+        self.resource_api.delete_domain(domain['id'])
+        self.assertRaises(exception.DomainNotFound,
+                          self.resource_api.get_domain,
+                          domain['id'])
+
+    def test_user_enabled_ignored_disable_error(self):
+        # Override.
+        self.skipTest("Doesn't apply since LDAP config has no affect on the "
+                      "SQL identity backend.")
+
+    def test_group_enabled_ignored_disable_error(self):
+        # Override.
+        self.skipTest("Doesn't apply since LDAP config has no affect on the "
+                      "SQL identity backend.")
+
+    def test_project_enabled_ignored_disable_error(self):
+        # Override
+        self.skipTest("Doesn't apply since LDAP configuration is ignored for "
+                      "SQL assignment backend.")
+
+
+class MultiLDAPandSQLIdentityDomainConfigsInSQL(MultiLDAPandSQLIdentity):
+    """Class to test the use of domain configs stored in the database.
+
+    Repeat the same tests as MultiLDAPandSQLIdentity, but instead of using the
+    domain specific config files, store the domain specific values in the
+    database.
+
+    """
+    def enable_multi_domain(self):
+        # The values below are the same as in the domain_configs_multi_ldap
+        # cdirectory of test config_files.
+        default_config = {
+            'ldap': {'url': 'fake://memory',
+                     'user': 'cn=Admin',
+                     'password': 'password',
+                     'suffix': 'cn=example,cn=com'},
+            'identity': {'driver': 'keystone.identity.backends.ldap.Identity'}
+        }
+        domain1_config = {
+            'ldap': {'url': 'fake://memory1',
+                     'user': 'cn=Admin',
+                     'password': 'password',
+                     'suffix': 'cn=example,cn=com'},
+            'identity': {'driver': 'keystone.identity.backends.ldap.Identity'}
+        }
+        domain2_config = {
+            'ldap': {'url': 'fake://memory',
+                     'user': 'cn=Admin',
+                     'password': 'password',
+                     'suffix': 'cn=myroot,cn=com',
+                     'group_tree_dn': 'ou=UserGroups,dc=myroot,dc=org',
+                     'user_tree_dn': 'ou=Users,dc=myroot,dc=org'},
+            'identity': {'driver': 'keystone.identity.backends.ldap.Identity'}
+        }
+
+        self.domain_config_api.create_config(CONF.identity.default_domain_id,
+                                             default_config)
+        self.domain_config_api.create_config(self.domains['domain1']['id'],
+                                             domain1_config)
+        self.domain_config_api.create_config(self.domains['domain2']['id'],
+                                             domain2_config)
+
+        self.config_fixture.config(
+            group='identity', domain_specific_drivers_enabled=True,
+            domain_configurations_from_database=True)
+        self.config_fixture.config(group='identity_mapping',
+                                   backward_compatible_ids=False)
+
+    def test_domain_config_has_no_impact_if_database_support_disabled(self):
+        """Ensure database domain configs have no effect if disabled.
+
+        Set reading from database configs to false, restart the backends
+        and then try and set and use database configs.
+
+        """
+        self.config_fixture.config(
+            group='identity', domain_configurations_from_database=False)
+        self.load_backends()
+        new_config = {'ldap': {'url': uuid.uuid4().hex}}
+        self.domain_config_api.create_config(
+            CONF.identity.default_domain_id, new_config)
+        # Trigger the identity backend to initialise any domain specific
+        # configurations
+        self.identity_api.list_users()
+        # Check that the new config has not been passed to the driver for
+        # the default domain.
+        default_config = (
+            self.identity_api.domain_configs.get_domain_conf(
+                CONF.identity.default_domain_id))
+        self.assertEqual(CONF.ldap.url, default_config.ldap.url)
+
+
+class DomainSpecificLDAPandSQLIdentity(
+    BaseLDAPIdentity, tests.SQLDriverOverrides, tests.TestCase,
+        BaseMultiLDAPandSQLIdentity):
+    """Class to test when all domains use specific configs, including SQL.
+
+    We define a set of domains and domain-specific backends:
+
+    - A separate LDAP backend for the default domain
+    - A separate SQL backend for domain1
+
+    Although the default driver still exists, we don't use it.
+
+    """
+    def setUp(self):
+        self.useFixture(database.Database())
+        super(DomainSpecificLDAPandSQLIdentity, self).setUp()
+        self.initial_setup()
+
+    def initial_setup(self):
+        # We aren't setting up any initial data ahead of switching to
+        # domain-specific operation, so make the switch straight away.
+        self.config_fixture.config(
+            group='identity', domain_specific_drivers_enabled=True,
+            domain_config_dir=(
+                tests.TESTCONF + '/domain_configs_one_sql_one_ldap'))
+        self.config_fixture.config(group='identity_mapping',
+                                   backward_compatible_ids=False)
+
+        self.load_backends()
+
+        self.engine = sql.get_engine()
+        self.addCleanup(sql.cleanup)
+
+        sql.ModelBase.metadata.create_all(bind=self.engine)
+        self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
+
+        self.domain_count = 2
+        self.domain_specific_count = 2
+        self.setup_initial_domains()
+        self.users = {}
+
+        self.clear_database()
+        self.load_fixtures(default_fixtures)
+        self.create_users_across_domains()
+
+    def config_overrides(self):
+        super(DomainSpecificLDAPandSQLIdentity, self).config_overrides()
+        # Make sure resource & assignment are actually SQL drivers,
+        # BaseLDAPIdentity causes this option to use LDAP.
+        self.config_fixture.config(
+            group='resource',
+            driver='keystone.resource.backends.sql.Resource')
+        self.config_fixture.config(
+            group='assignment',
+            driver='keystone.assignment.backends.sql.Assignment')
+
+    def reload_backends(self, domain_id):
+        # Just reload the driver for this domain - which will pickup
+        # any updated cfg
+        self.identity_api.domain_configs.reload_domain_driver(domain_id)
+
+    def get_config(self, domain_id):
+        # Get the config for this domain, will return CONF
+        # if no specific config defined for this domain
+        return self.identity_api.domain_configs.get_domain_conf(domain_id)
+
+    def test_list_domains(self):
+        self.skipTest(
+            'N/A: Not relevant for multi ldap testing')
+
+    def test_list_domains_non_default_domain_id(self):
+        self.skipTest(
+            'N/A: Not relevant for multi ldap testing')
+
+    def test_domain_crud(self):
+        self.skipTest(
+            'N/A: Not relevant for multi ldap testing')
+
+    def test_list_users(self):
+        # Override the standard list users, since we have added an extra user
+        # to the default domain, so the number of expected users is one more
+        # than in the standard test.
+        users = self.identity_api.list_users(
+            domain_scope=self._set_domain_scope(
+                CONF.identity.default_domain_id))
+        self.assertEqual(len(default_fixtures.USERS) + 1, len(users))
+        user_ids = set(user['id'] for user in users)
+        expected_user_ids = set(getattr(self, 'user_%s' % user['id'])['id']
+                                for user in default_fixtures.USERS)
+        expected_user_ids.add(self.users['user0']['id'])
+        for user_ref in users:
+            self.assertNotIn('password', user_ref)
+        self.assertEqual(expected_user_ids, user_ids)
+
+    def test_domain_segregation(self):
+        """Test that separate configs have segregated the domain.
+
+        Test Plan:
+
+        - Users were created in each domain as part of setup, now make sure
+          you can only find a given user in its relevant domain/backend
+        - Make sure that for a backend that supports multiple domains
+          you can get the users via any of its domains
+
+        """
+        # Check that I can read a user with the appropriate domain-selected
+        # driver, but won't find it via any other domain driver
+
+        self.check_user(self.users['user0'],
+                        self.domains['domain_default']['id'], 200)
+        self.check_user(self.users['user0'],
+                        self.domains['domain1']['id'], exception.UserNotFound)
+
+        self.check_user(self.users['user1'],
+                        self.domains['domain1']['id'], 200)
+        self.check_user(self.users['user1'],
+                        self.domains['domain_default']['id'],
+                        exception.UserNotFound)
+
+        # Finally, going through the regular manager layer, make sure we
+        # only see the right number of users in the non-default domain.
+
+        self.assertThat(
+            self.identity_api.list_users(
+                domain_scope=self.domains['domain1']['id']),
+            matchers.HasLength(1))
+
+    def test_add_role_grant_to_user_and_project_404(self):
+        self.skipTest('Blocked by bug 1101287')
+
+    def test_get_role_grants_for_user_and_project_404(self):
+        self.skipTest('Blocked by bug 1101287')
+
+    def test_list_projects_for_user_with_grants(self):
+        self.skipTest('Blocked by bug 1221805')
+
+    def test_get_roles_for_user_and_project_user_group_same_id(self):
+        self.skipTest('N/A: We never generate the same ID for a user and '
+                      'group in our mapping table')
+
+    def test_user_id_comma(self):
+        self.skipTest('Only valid if it is guaranteed to be talking to '
+                      'the fakeldap backend')
+
+    def test_user_id_comma_grants(self):
+        self.skipTest('Only valid if it is guaranteed to be talking to '
+                      'the fakeldap backend')
+
+    def test_user_enabled_ignored_disable_error(self):
+        # Override.
+        self.skipTest("Doesn't apply since LDAP config has no affect on the "
+                      "SQL identity backend.")
+
+    def test_group_enabled_ignored_disable_error(self):
+        # Override.
+        self.skipTest("Doesn't apply since LDAP config has no affect on the "
+                      "SQL identity backend.")
+
+    def test_project_enabled_ignored_disable_error(self):
+        # Override
+        self.skipTest("Doesn't apply since LDAP configuration is ignored for "
+                      "SQL assignment backend.")
+
+
+class DomainSpecificSQLIdentity(DomainSpecificLDAPandSQLIdentity):
+    """Class to test simplest use of domain-specific SQL driver.
+
+    The simplest use of an SQL domain-specific backend is when it is used to
+    augment the standard case when LDAP is the default driver defined in the
+    main config file. This would allow, for example, service users to be
+    stored in SQL while LDAP handles the rest. Hence we define:
+
+    - The default driver uses the LDAP backend for the default domain
+    - A separate SQL backend for domain1
+
+    """
+    def initial_setup(self):
+        # We aren't setting up any initial data ahead of switching to
+        # domain-specific operation, so make the switch straight away.
+        self.config_fixture.config(
+            group='identity', domain_specific_drivers_enabled=True,
+            domain_config_dir=(
+                tests.TESTCONF + '/domain_configs_default_ldap_one_sql'))
+        # Part of the testing counts how many new mappings get created as
+        # we create users, so ensure we are NOT using mapping for the default
+        # LDAP domain so this doesn't confuse the calculation.
+        self.config_fixture.config(group='identity_mapping',
+                                   backward_compatible_ids=True)
+
+        self.load_backends()
+
+        self.engine = sql.get_engine()
+        self.addCleanup(sql.cleanup)
+
+        sql.ModelBase.metadata.create_all(bind=self.engine)
+        self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
+
+        self.domain_count = 2
+        self.domain_specific_count = 1
+        self.setup_initial_domains()
+        self.users = {}
+
+        self.load_fixtures(default_fixtures)
+        self.create_users_across_domains()
+
+    def config_overrides(self):
+        super(DomainSpecificSQLIdentity, self).config_overrides()
+        self.config_fixture.config(
+            group='identity',
+            driver='keystone.identity.backends.ldap.Identity')
+        self.config_fixture.config(
+            group='resource',
+            driver='keystone.resource.backends.sql.Resource')
+        self.config_fixture.config(
+            group='assignment',
+            driver='keystone.assignment.backends.sql.Assignment')
+
+    def get_config(self, domain_id):
+        if domain_id == CONF.identity.default_domain_id:
+            return CONF
+        else:
+            return self.identity_api.domain_configs.get_domain_conf(domain_id)
+
+    def reload_backends(self, domain_id):
+        if domain_id == CONF.identity.default_domain_id:
+            self.load_backends()
+        else:
+            # Just reload the driver for this domain - which will pickup
+            # any updated cfg
+            self.identity_api.domain_configs.reload_domain_driver(domain_id)
+
+    def test_default_sql_plus_sql_specific_driver_fails(self):
+        # First confirm that if ldap is default driver, domain1 can be
+        # loaded as sql
+        self.config_fixture.config(
+            group='identity',
+            driver='keystone.identity.backends.ldap.Identity')
+        self.config_fixture.config(
+            group='assignment',
+            driver='keystone.assignment.backends.sql.Assignment')
+        self.load_backends()
+        # Make any identity call to initiate the lazy loading of configs
+        self.identity_api.list_users(
+            domain_scope=CONF.identity.default_domain_id)
+        self.assertIsNotNone(self.get_config(self.domains['domain1']['id']))
+
+        # Now re-initialize, but with sql as the default identity driver
+        self.config_fixture.config(
+            group='identity',
+            driver='keystone.identity.backends.sql.Identity')
+        self.config_fixture.config(
+            group='assignment',
+            driver='keystone.assignment.backends.sql.Assignment')
+        self.load_backends()
+        # Make any identity call to initiate the lazy loading of configs, which
+        # should fail since we would now have two sql drivers.
+        self.assertRaises(exception.MultipleSQLDriversInConfig,
+                          self.identity_api.list_users,
+                          domain_scope=CONF.identity.default_domain_id)
+
+    def test_multiple_sql_specific_drivers_fails(self):
+        self.config_fixture.config(
+            group='identity',
+            driver='keystone.identity.backends.ldap.Identity')
+        self.config_fixture.config(
+            group='assignment',
+            driver='keystone.assignment.backends.sql.Assignment')
+        self.load_backends()
+        # Ensure default, domain1 and domain2 exist
+        self.domain_count = 3
+        self.setup_initial_domains()
+        # Make any identity call to initiate the lazy loading of configs
+        self.identity_api.list_users(
+            domain_scope=CONF.identity.default_domain_id)
+        # This will only load domain1, since the domain2 config file is
+        # not stored in the same location
+        self.assertIsNotNone(self.get_config(self.domains['domain1']['id']))
+
+        # Now try and manually load a 2nd sql specific driver, for domain2,
+        # which should fail.
+        self.assertRaises(
+            exception.MultipleSQLDriversInConfig,
+            self.identity_api.domain_configs._load_config_from_file,
+            self.resource_api,
+            [tests.TESTCONF + '/domain_configs_one_extra_sql/' +
+             'keystone.domain2.conf'],
+            'domain2')
+
+
+class LdapFilterTests(test_backend.FilterTests, tests.TestCase):
+
+    def setUp(self):
+        super(LdapFilterTests, self).setUp()
+        self.useFixture(database.Database())
+        self.clear_database()
+
+        common_ldap.register_handler('fake://', fakeldap.FakeLdap)
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+
+        self.engine = sql.get_engine()
+        self.addCleanup(sql.cleanup)
+        sql.ModelBase.metadata.create_all(bind=self.engine)
+
+        self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
+        self.addCleanup(common_ldap_core._HANDLERS.clear)
+
+    def config_overrides(self):
+        super(LdapFilterTests, self).config_overrides()
+        self.config_fixture.config(
+            group='identity',
+            driver='keystone.identity.backends.ldap.Identity')
+
+    def config_files(self):
+        config_files = super(LdapFilterTests, self).config_files()
+        config_files.append(tests.dirs.tests_conf('backend_ldap.conf'))
+        return config_files
+
+    def clear_database(self):
+        for shelf in fakeldap.FakeShelves:
+            fakeldap.FakeShelves[shelf].clear()
diff --git a/keystone-moon/keystone/tests/unit/test_backend_ldap_pool.py b/keystone-moon/keystone/tests/unit/test_backend_ldap_pool.py
new file mode 100644 (file)
index 0000000..eee03b8
--- /dev/null
@@ -0,0 +1,244 @@
+# -*- coding: utf-8 -*-
+# Copyright 2012 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import ldappool
+import mock
+from oslo_config import cfg
+from oslotest import mockpatch
+
+from keystone.common.ldap import core as ldap_core
+from keystone.identity.backends import ldap
+from keystone.tests import unit as tests
+from keystone.tests.unit import fakeldap
+from keystone.tests.unit import test_backend_ldap
+
+CONF = cfg.CONF
+
+
+class LdapPoolCommonTestMixin(object):
+    """LDAP pool specific common tests used here and in live tests."""
+
+    def cleanup_pools(self):
+        ldap_core.PooledLDAPHandler.connection_pools.clear()
+
+    def test_handler_with_use_pool_enabled(self):
+        # by default use_pool and use_auth_pool is enabled in test pool config
+        user_ref = self.identity_api.get_user(self.user_foo['id'])
+        self.user_foo.pop('password')
+        self.assertDictEqual(user_ref, self.user_foo)
+
+        handler = ldap_core._get_connection(CONF.ldap.url, use_pool=True)
+        self.assertIsInstance(handler, ldap_core.PooledLDAPHandler)
+
+    @mock.patch.object(ldap_core.KeystoneLDAPHandler, 'connect')
+    @mock.patch.object(ldap_core.KeystoneLDAPHandler, 'simple_bind_s')
+    def test_handler_with_use_pool_not_enabled(self, bind_method,
+                                               connect_method):
+        self.config_fixture.config(group='ldap', use_pool=False)
+        self.config_fixture.config(group='ldap', use_auth_pool=True)
+        self.cleanup_pools()
+
+        user_api = ldap.UserApi(CONF)
+        handler = user_api.get_connection(user=None, password=None,
+                                          end_user_auth=True)
+        # use_auth_pool flag does not matter when use_pool is False
+        # still handler is non pool version
+        self.assertIsInstance(handler.conn, ldap_core.PythonLDAPHandler)
+
+    @mock.patch.object(ldap_core.KeystoneLDAPHandler, 'connect')
+    @mock.patch.object(ldap_core.KeystoneLDAPHandler, 'simple_bind_s')
+    def test_handler_with_end_user_auth_use_pool_not_enabled(self, bind_method,
+                                                             connect_method):
+        # by default use_pool is enabled in test pool config
+        # now disabling use_auth_pool flag to test handler instance
+        self.config_fixture.config(group='ldap', use_auth_pool=False)
+        self.cleanup_pools()
+
+        user_api = ldap.UserApi(CONF)
+        handler = user_api.get_connection(user=None, password=None,
+                                          end_user_auth=True)
+        self.assertIsInstance(handler.conn, ldap_core.PythonLDAPHandler)
+
+        # For end_user_auth case, flag should not be false otherwise
+        # it will use, admin connections ldap pool
+        handler = user_api.get_connection(user=None, password=None,
+                                          end_user_auth=False)
+        self.assertIsInstance(handler.conn, ldap_core.PooledLDAPHandler)
+
+    def test_pool_size_set(self):
+        # get related connection manager instance
+        ldappool_cm = self.conn_pools[CONF.ldap.url]
+        self.assertEqual(CONF.ldap.pool_size, ldappool_cm.size)
+
+    def test_pool_retry_max_set(self):
+        # get related connection manager instance
+        ldappool_cm = self.conn_pools[CONF.ldap.url]
+        self.assertEqual(CONF.ldap.pool_retry_max, ldappool_cm.retry_max)
+
+    def test_pool_retry_delay_set(self):
+        # just make one identity call to initiate ldap connection if not there
+        self.identity_api.get_user(self.user_foo['id'])
+
+        # get related connection manager instance
+        ldappool_cm = self.conn_pools[CONF.ldap.url]
+        self.assertEqual(CONF.ldap.pool_retry_delay, ldappool_cm.retry_delay)
+
+    def test_pool_use_tls_set(self):
+        # get related connection manager instance
+        ldappool_cm = self.conn_pools[CONF.ldap.url]
+        self.assertEqual(CONF.ldap.use_tls, ldappool_cm.use_tls)
+
+    def test_pool_timeout_set(self):
+        # get related connection manager instance
+        ldappool_cm = self.conn_pools[CONF.ldap.url]
+        self.assertEqual(CONF.ldap.pool_connection_timeout,
+                         ldappool_cm.timeout)
+
+    def test_pool_use_pool_set(self):
+        # get related connection manager instance
+        ldappool_cm = self.conn_pools[CONF.ldap.url]
+        self.assertEqual(CONF.ldap.use_pool, ldappool_cm.use_pool)
+
+    def test_pool_connection_lifetime_set(self):
+        # get related connection manager instance
+        ldappool_cm = self.conn_pools[CONF.ldap.url]
+        self.assertEqual(CONF.ldap.pool_connection_lifetime,
+                         ldappool_cm.max_lifetime)
+
+    def test_max_connection_error_raised(self):
+
+        who = CONF.ldap.user
+        cred = CONF.ldap.password
+        # get related connection manager instance
+        ldappool_cm = self.conn_pools[CONF.ldap.url]
+        ldappool_cm.size = 2
+
+        # 3rd connection attempt should raise Max connection error
+        with ldappool_cm.connection(who, cred) as _:  # conn1
+            with ldappool_cm.connection(who, cred) as _:  # conn2
+                try:
+                    with ldappool_cm.connection(who, cred) as _:  # conn3
+                        _.unbind_s()
+                        self.fail()
+                except Exception as ex:
+                    self.assertIsInstance(ex,
+                                          ldappool.MaxConnectionReachedError)
+        ldappool_cm.size = CONF.ldap.pool_size
+
+    def test_pool_size_expands_correctly(self):
+
+        who = CONF.ldap.user
+        cred = CONF.ldap.password
+        # get related connection manager instance
+        ldappool_cm = self.conn_pools[CONF.ldap.url]
+        ldappool_cm.size = 3
+
+        def _get_conn():
+            return ldappool_cm.connection(who, cred)
+
+        # Open 3 connections first
+        with _get_conn() as _:  # conn1
+            self.assertEqual(len(ldappool_cm), 1)
+            with _get_conn() as _:  # conn2
+                self.assertEqual(len(ldappool_cm), 2)
+                with _get_conn() as _:  # conn2
+                    _.unbind_ext_s()
+                    self.assertEqual(len(ldappool_cm), 3)
+
+        # Then open 3 connections again and make sure size does not grow
+        # over 3
+        with _get_conn() as _:  # conn1
+            self.assertEqual(len(ldappool_cm), 1)
+            with _get_conn() as _:  # conn2
+                self.assertEqual(len(ldappool_cm), 2)
+                with _get_conn() as _:  # conn3
+                    _.unbind_ext_s()
+                    self.assertEqual(len(ldappool_cm), 3)
+
+    def test_password_change_with_pool(self):
+        old_password = self.user_sna['password']
+        self.cleanup_pools()
+
+        # authenticate so that connection is added to pool before password
+        # change
+        user_ref = self.identity_api.authenticate(
+            context={},
+            user_id=self.user_sna['id'],
+            password=self.user_sna['password'])
+
+        self.user_sna.pop('password')
+        self.user_sna['enabled'] = True
+        self.assertDictEqual(user_ref, self.user_sna)
+
+        new_password = 'new_password'
+        user_ref['password'] = new_password
+        self.identity_api.update_user(user_ref['id'], user_ref)
+
+        # now authenticate again to make sure new password works with
+        # conneciton pool
+        user_ref2 = self.identity_api.authenticate(
+            context={},
+            user_id=self.user_sna['id'],
+            password=new_password)
+
+        user_ref.pop('password')
+        self.assertDictEqual(user_ref, user_ref2)
+
+        # Authentication with old password would not work here as there
+        # is only one connection in pool which get bind again with updated
+        # password..so no old bind is maintained in this case.
+        self.assertRaises(AssertionError,
+                          self.identity_api.authenticate,
+                          context={},
+                          user_id=self.user_sna['id'],
+                          password=old_password)
+
+
+class LdapIdentitySqlAssignment(LdapPoolCommonTestMixin,
+                                test_backend_ldap.LdapIdentitySqlAssignment,
+                                tests.TestCase):
+    '''Executes existing base class 150+ tests with pooled LDAP handler to make
+    sure it works without any error.
+    '''
+    def setUp(self):
+        self.useFixture(mockpatch.PatchObject(
+            ldap_core.PooledLDAPHandler, 'Connector', fakeldap.FakeLdapPool))
+        super(LdapIdentitySqlAssignment, self).setUp()
+
+        self.addCleanup(self.cleanup_pools)
+        # storing to local variable to avoid long references
+        self.conn_pools = ldap_core.PooledLDAPHandler.connection_pools
+        # super class loads db fixtures which establishes ldap connection
+        # so adding dummy call to highlight connection pool initialization
+        # as its not that obvious though its not needed here
+        self.identity_api.get_user(self.user_foo['id'])
+
+    def config_files(self):
+        config_files = super(LdapIdentitySqlAssignment, self).config_files()
+        config_files.append(tests.dirs.tests_conf('backend_ldap_pool.conf'))
+        return config_files
+
+    @mock.patch.object(ldap_core, 'utf8_encode')
+    def test_utf8_encoded_is_used_in_pool(self, mocked_method):
+        def side_effect(arg):
+            return arg
+        mocked_method.side_effect = side_effect
+        # invalidate the cache to get utf8_encode function called.
+        self.identity_api.get_user.invalidate(self.identity_api,
+                                              self.user_foo['id'])
+        self.identity_api.get_user(self.user_foo['id'])
+        mocked_method.assert_any_call(CONF.ldap.user)
+        mocked_method.assert_any_call(CONF.ldap.password)
diff --git a/keystone-moon/keystone/tests/unit/test_backend_rules.py b/keystone-moon/keystone/tests/unit/test_backend_rules.py
new file mode 100644 (file)
index 0000000..c9c4f15
--- /dev/null
@@ -0,0 +1,62 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from keystone import exception
+from keystone.tests import unit as tests
+from keystone.tests.unit import test_backend
+
+
+class RulesPolicy(tests.TestCase, test_backend.PolicyTests):
+    def setUp(self):
+        super(RulesPolicy, self).setUp()
+        self.load_backends()
+
+    def config_overrides(self):
+        super(RulesPolicy, self).config_overrides()
+        self.config_fixture.config(
+            group='policy',
+            driver='keystone.policy.backends.rules.Policy')
+
+    def test_create(self):
+        self.assertRaises(exception.NotImplemented,
+                          super(RulesPolicy, self).test_create)
+
+    def test_get(self):
+        self.assertRaises(exception.NotImplemented,
+                          super(RulesPolicy, self).test_get)
+
+    def test_list(self):
+        self.assertRaises(exception.NotImplemented,
+                          super(RulesPolicy, self).test_list)
+
+    def test_update(self):
+        self.assertRaises(exception.NotImplemented,
+                          super(RulesPolicy, self).test_update)
+
+    def test_delete(self):
+        self.assertRaises(exception.NotImplemented,
+                          super(RulesPolicy, self).test_delete)
+
+    def test_get_policy_404(self):
+        self.assertRaises(exception.NotImplemented,
+                          super(RulesPolicy, self).test_get_policy_404)
+
+    def test_update_policy_404(self):
+        self.assertRaises(exception.NotImplemented,
+                          super(RulesPolicy, self).test_update_policy_404)
+
+    def test_delete_policy_404(self):
+        self.assertRaises(exception.NotImplemented,
+                          super(RulesPolicy, self).test_delete_policy_404)
diff --git a/keystone-moon/keystone/tests/unit/test_backend_sql.py b/keystone-moon/keystone/tests/unit/test_backend_sql.py
new file mode 100644 (file)
index 0000000..a7c63bf
--- /dev/null
@@ -0,0 +1,948 @@
+# -*- coding: utf-8 -*-
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+import uuid
+
+import mock
+from oslo_config import cfg
+from oslo_db import exception as db_exception
+from oslo_db import options
+import sqlalchemy
+from sqlalchemy import exc
+from testtools import matchers
+
+from keystone.common import driver_hints
+from keystone.common import sql
+from keystone import exception
+from keystone.identity.backends import sql as identity_sql
+from keystone.openstack.common import versionutils
+from keystone.tests import unit as tests
+from keystone.tests.unit import default_fixtures
+from keystone.tests.unit.ksfixtures import database
+from keystone.tests.unit import test_backend
+from keystone.token.persistence.backends import sql as token_sql
+
+
+CONF = cfg.CONF
+DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
+
+
+class SqlTests(tests.SQLDriverOverrides, tests.TestCase):
+
+    def setUp(self):
+        super(SqlTests, self).setUp()
+        self.useFixture(database.Database())
+        self.load_backends()
+
+        # populate the engine with tables & fixtures
+        self.load_fixtures(default_fixtures)
+        # defaulted by the data load
+        self.user_foo['enabled'] = True
+
+    def config_files(self):
+        config_files = super(SqlTests, self).config_files()
+        config_files.append(tests.dirs.tests_conf('backend_sql.conf'))
+        return config_files
+
+
+class SqlModels(SqlTests):
+
+    def select_table(self, name):
+        table = sqlalchemy.Table(name,
+                                 sql.ModelBase.metadata,
+                                 autoload=True)
+        s = sqlalchemy.select([table])
+        return s
+
+    def assertExpectedSchema(self, table, cols):
+        table = self.select_table(table)
+        for col, type_, length in cols:
+            self.assertIsInstance(table.c[col].type, type_)
+            if length:
+                self.assertEqual(length, table.c[col].type.length)
+
+    def test_user_model(self):
+        cols = (('id', sql.String, 64),
+                ('name', sql.String, 255),
+                ('password', sql.String, 128),
+                ('domain_id', sql.String, 64),
+                ('enabled', sql.Boolean, None),
+                ('extra', sql.JsonBlob, None))
+        self.assertExpectedSchema('user', cols)
+
+    def test_group_model(self):
+        cols = (('id', sql.String, 64),
+                ('name', sql.String, 64),
+                ('description', sql.Text, None),
+                ('domain_id', sql.String, 64),
+                ('extra', sql.JsonBlob, None))
+        self.assertExpectedSchema('group', cols)
+
+    def test_domain_model(self):
+        cols = (('id', sql.String, 64),
+                ('name', sql.String, 64),
+                ('enabled', sql.Boolean, None))
+        self.assertExpectedSchema('domain', cols)
+
+    def test_project_model(self):
+        cols = (('id', sql.String, 64),
+                ('name', sql.String, 64),
+                ('description', sql.Text, None),
+                ('domain_id', sql.String, 64),
+                ('enabled', sql.Boolean, None),
+                ('extra', sql.JsonBlob, None),
+                ('parent_id', sql.String, 64))
+        self.assertExpectedSchema('project', cols)
+
+    def test_role_assignment_model(self):
+        cols = (('type', sql.Enum, None),
+                ('actor_id', sql.String, 64),
+                ('target_id', sql.String, 64),
+                ('role_id', sql.String, 64),
+                ('inherited', sql.Boolean, False))
+        self.assertExpectedSchema('assignment', cols)
+
+    def test_user_group_membership(self):
+        cols = (('group_id', sql.String, 64),
+                ('user_id', sql.String, 64))
+        self.assertExpectedSchema('user_group_membership', cols)
+
+
+class SqlIdentity(SqlTests, test_backend.IdentityTests):
+    def test_password_hashed(self):
+        session = sql.get_session()
+        user_ref = self.identity_api._get_user(session, self.user_foo['id'])
+        self.assertNotEqual(user_ref['password'], self.user_foo['password'])
+
+    def test_delete_user_with_project_association(self):
+        user = {'name': uuid.uuid4().hex,
+                'domain_id': DEFAULT_DOMAIN_ID,
+                'password': uuid.uuid4().hex}
+        user = self.identity_api.create_user(user)
+        self.assignment_api.add_user_to_project(self.tenant_bar['id'],
+                                                user['id'])
+        self.identity_api.delete_user(user['id'])
+        self.assertRaises(exception.UserNotFound,
+                          self.assignment_api.list_projects_for_user,
+                          user['id'])
+
+    def test_create_null_user_name(self):
+        user = {'name': None,
+                'domain_id': DEFAULT_DOMAIN_ID,
+                'password': uuid.uuid4().hex}
+        self.assertRaises(exception.ValidationError,
+                          self.identity_api.create_user,
+                          user)
+        self.assertRaises(exception.UserNotFound,
+                          self.identity_api.get_user_by_name,
+                          user['name'],
+                          DEFAULT_DOMAIN_ID)
+
+    def test_create_user_case_sensitivity(self):
+        # user name case sensitivity is down to the fact that it is marked as
+        # an SQL UNIQUE column, which may not be valid for other backends, like
+        # LDAP.
+
+        # create a ref with a lowercase name
+        ref = {
+            'name': uuid.uuid4().hex.lower(),
+            'domain_id': DEFAULT_DOMAIN_ID}
+        ref = self.identity_api.create_user(ref)
+
+        # assign a new ID with the same name, but this time in uppercase
+        ref['name'] = ref['name'].upper()
+        self.identity_api.create_user(ref)
+
+    def test_create_project_case_sensitivity(self):
+        # project name case sensitivity is down to the fact that it is marked
+        # as an SQL UNIQUE column, which may not be valid for other backends,
+        # like LDAP.
+
+        # create a ref with a lowercase name
+        ref = {
+            'id': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex.lower(),
+            'domain_id': DEFAULT_DOMAIN_ID}
+        self.resource_api.create_project(ref['id'], ref)
+
+        # assign a new ID with the same name, but this time in uppercase
+        ref['id'] = uuid.uuid4().hex
+        ref['name'] = ref['name'].upper()
+        self.resource_api.create_project(ref['id'], ref)
+
+    def test_create_null_project_name(self):
+        tenant = {'id': uuid.uuid4().hex,
+                  'name': None,
+                  'domain_id': DEFAULT_DOMAIN_ID}
+        self.assertRaises(exception.ValidationError,
+                          self.resource_api.create_project,
+                          tenant['id'],
+                          tenant)
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project,
+                          tenant['id'])
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project_by_name,
+                          tenant['name'],
+                          DEFAULT_DOMAIN_ID)
+
+    def test_delete_project_with_user_association(self):
+        user = {'name': 'fakeuser',
+                'domain_id': DEFAULT_DOMAIN_ID,
+                'password': 'passwd'}
+        user = self.identity_api.create_user(user)
+        self.assignment_api.add_user_to_project(self.tenant_bar['id'],
+                                                user['id'])
+        self.resource_api.delete_project(self.tenant_bar['id'])
+        tenants = self.assignment_api.list_projects_for_user(user['id'])
+        self.assertEqual([], tenants)
+
+    def test_metadata_removed_on_delete_user(self):
+        # A test to check that the internal representation
+        # or roles is correctly updated when a user is deleted
+        user = {'name': uuid.uuid4().hex,
+                'domain_id': DEFAULT_DOMAIN_ID,
+                'password': 'passwd'}
+        user = self.identity_api.create_user(user)
+        role = {'id': uuid.uuid4().hex,
+                'name': uuid.uuid4().hex}
+        self.role_api.create_role(role['id'], role)
+        self.assignment_api.add_role_to_user_and_project(
+            user['id'],
+            self.tenant_bar['id'],
+            role['id'])
+        self.identity_api.delete_user(user['id'])
+
+        # Now check whether the internal representation of roles
+        # has been deleted
+        self.assertRaises(exception.MetadataNotFound,
+                          self.assignment_api._get_metadata,
+                          user['id'],
+                          self.tenant_bar['id'])
+
+    def test_metadata_removed_on_delete_project(self):
+        # A test to check that the internal representation
+        # or roles is correctly updated when a project is deleted
+        user = {'name': uuid.uuid4().hex,
+                'domain_id': DEFAULT_DOMAIN_ID,
+                'password': 'passwd'}
+        user = self.identity_api.create_user(user)
+        role = {'id': uuid.uuid4().hex,
+                'name': uuid.uuid4().hex}
+        self.role_api.create_role(role['id'], role)
+        self.assignment_api.add_role_to_user_and_project(
+            user['id'],
+            self.tenant_bar['id'],
+            role['id'])
+        self.resource_api.delete_project(self.tenant_bar['id'])
+
+        # Now check whether the internal representation of roles
+        # has been deleted
+        self.assertRaises(exception.MetadataNotFound,
+                          self.assignment_api._get_metadata,
+                          user['id'],
+                          self.tenant_bar['id'])
+
+    def test_update_project_returns_extra(self):
+        """This tests for backwards-compatibility with an essex/folsom bug.
+
+        Non-indexed attributes were returned in an 'extra' attribute, instead
+        of on the entity itself; for consistency and backwards compatibility,
+        those attributes should be included twice.
+
+        This behavior is specific to the SQL driver.
+
+        """
+        tenant_id = uuid.uuid4().hex
+        arbitrary_key = uuid.uuid4().hex
+        arbitrary_value = uuid.uuid4().hex
+        tenant = {
+            'id': tenant_id,
+            'name': uuid.uuid4().hex,
+            'domain_id': DEFAULT_DOMAIN_ID,
+            arbitrary_key: arbitrary_value}
+        ref = self.resource_api.create_project(tenant_id, tenant)
+        self.assertEqual(arbitrary_value, ref[arbitrary_key])
+        self.assertIsNone(ref.get('extra'))
+
+        tenant['name'] = uuid.uuid4().hex
+        ref = self.resource_api.update_project(tenant_id, tenant)
+        self.assertEqual(arbitrary_value, ref[arbitrary_key])
+        self.assertEqual(arbitrary_value, ref['extra'][arbitrary_key])
+
+    def test_update_user_returns_extra(self):
+        """This tests for backwards-compatibility with an essex/folsom bug.
+
+        Non-indexed attributes were returned in an 'extra' attribute, instead
+        of on the entity itself; for consistency and backwards compatibility,
+        those attributes should be included twice.
+
+        This behavior is specific to the SQL driver.
+
+        """
+        arbitrary_key = uuid.uuid4().hex
+        arbitrary_value = uuid.uuid4().hex
+        user = {
+            'name': uuid.uuid4().hex,
+            'domain_id': DEFAULT_DOMAIN_ID,
+            'password': uuid.uuid4().hex,
+            arbitrary_key: arbitrary_value}
+        ref = self.identity_api.create_user(user)
+        self.assertEqual(arbitrary_value, ref[arbitrary_key])
+        self.assertIsNone(ref.get('password'))
+        self.assertIsNone(ref.get('extra'))
+
+        user['name'] = uuid.uuid4().hex
+        user['password'] = uuid.uuid4().hex
+        ref = self.identity_api.update_user(ref['id'], user)
+        self.assertIsNone(ref.get('password'))
+        self.assertIsNone(ref['extra'].get('password'))
+        self.assertEqual(arbitrary_value, ref[arbitrary_key])
+        self.assertEqual(arbitrary_value, ref['extra'][arbitrary_key])
+
+    def test_sql_user_to_dict_null_default_project_id(self):
+        user = {
+            'name': uuid.uuid4().hex,
+            'domain_id': DEFAULT_DOMAIN_ID,
+            'password': uuid.uuid4().hex}
+
+        user = self.identity_api.create_user(user)
+        session = sql.get_session()
+        query = session.query(identity_sql.User)
+        query = query.filter_by(id=user['id'])
+        raw_user_ref = query.one()
+        self.assertIsNone(raw_user_ref.default_project_id)
+        user_ref = raw_user_ref.to_dict()
+        self.assertNotIn('default_project_id', user_ref)
+        session.close()
+
+    def test_list_domains_for_user(self):
+        domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain['id'], domain)
+        user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+                'domain_id': domain['id'], 'enabled': True}
+
+        test_domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(test_domain1['id'], test_domain1)
+        test_domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(test_domain2['id'], test_domain2)
+
+        user = self.identity_api.create_user(user)
+        user_domains = self.assignment_api.list_domains_for_user(user['id'])
+        self.assertEqual(0, len(user_domains))
+        self.assignment_api.create_grant(user_id=user['id'],
+                                         domain_id=test_domain1['id'],
+                                         role_id=self.role_member['id'])
+        self.assignment_api.create_grant(user_id=user['id'],
+                                         domain_id=test_domain2['id'],
+                                         role_id=self.role_member['id'])
+        user_domains = self.assignment_api.list_domains_for_user(user['id'])
+        self.assertThat(user_domains, matchers.HasLength(2))
+
+    def test_list_domains_for_user_with_grants(self):
+        # Create two groups each with a role on a different domain, and
+        # make user1 a member of both groups.  Both these new domains
+        # should now be included, along with any direct user grants.
+        domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain['id'], domain)
+        user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+                'domain_id': domain['id'], 'enabled': True}
+        user = self.identity_api.create_user(user)
+        group1 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
+        group1 = self.identity_api.create_group(group1)
+        group2 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
+        group2 = self.identity_api.create_group(group2)
+
+        test_domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(test_domain1['id'], test_domain1)
+        test_domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(test_domain2['id'], test_domain2)
+        test_domain3 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(test_domain3['id'], test_domain3)
+
+        self.identity_api.add_user_to_group(user['id'], group1['id'])
+        self.identity_api.add_user_to_group(user['id'], group2['id'])
+
+        # Create 3 grants, one user grant, the other two as group grants
+        self.assignment_api.create_grant(user_id=user['id'],
+                                         domain_id=test_domain1['id'],
+                                         role_id=self.role_member['id'])
+        self.assignment_api.create_grant(group_id=group1['id'],
+                                         domain_id=test_domain2['id'],
+                                         role_id=self.role_admin['id'])
+        self.assignment_api.create_grant(group_id=group2['id'],
+                                         domain_id=test_domain3['id'],
+                                         role_id=self.role_admin['id'])
+        user_domains = self.assignment_api.list_domains_for_user(user['id'])
+        self.assertThat(user_domains, matchers.HasLength(3))
+
+    def test_list_domains_for_user_with_inherited_grants(self):
+        """Test that inherited roles on the domain are excluded.
+
+        Test Plan:
+
+        - Create two domains, one user, group and role
+        - Domain1 is given an inherited user role, Domain2 an inherited
+          group role (for a group of which the user is a member)
+        - When listing domains for user, neither domain should be returned
+
+        """
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        domain1 = self.resource_api.create_domain(domain1['id'], domain1)
+        domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        domain2 = self.resource_api.create_domain(domain2['id'], domain2)
+        user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+                'domain_id': domain1['id'], 'enabled': True}
+        user = self.identity_api.create_user(user)
+        group = {'name': uuid.uuid4().hex, 'domain_id': domain1['id']}
+        group = self.identity_api.create_group(group)
+        self.identity_api.add_user_to_group(user['id'], group['id'])
+        role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.role_api.create_role(role['id'], role)
+
+        # Create a grant on each domain, one user grant, one group grant,
+        # both inherited.
+        self.assignment_api.create_grant(user_id=user['id'],
+                                         domain_id=domain1['id'],
+                                         role_id=role['id'],
+                                         inherited_to_projects=True)
+        self.assignment_api.create_grant(group_id=group['id'],
+                                         domain_id=domain2['id'],
+                                         role_id=role['id'],
+                                         inherited_to_projects=True)
+
+        user_domains = self.assignment_api.list_domains_for_user(user['id'])
+        # No domains should be returned since both domains have only inherited
+        # roles assignments.
+        self.assertThat(user_domains, matchers.HasLength(0))
+
+
+class SqlTrust(SqlTests, test_backend.TrustTests):
+    pass
+
+
+class SqlToken(SqlTests, test_backend.TokenTests):
+    def test_token_revocation_list_uses_right_columns(self):
+        # This query used to be heavy with too many columns. We want
+        # to make sure it is only running with the minimum columns
+        # necessary.
+
+        expected_query_args = (token_sql.TokenModel.id,
+                               token_sql.TokenModel.expires)
+
+        with mock.patch.object(token_sql, 'sql') as mock_sql:
+            tok = token_sql.Token()
+            tok.list_revoked_tokens()
+
+        mock_query = mock_sql.get_session().query
+        mock_query.assert_called_with(*expected_query_args)
+
+    def test_flush_expired_tokens_batch(self):
+        # TODO(dstanek): This test should be rewritten to be less
+        # brittle. The code will likely need to be changed first. I
+        # just copied the spirit of the existing test when I rewrote
+        # mox -> mock. These tests are brittle because they have the
+        # call structure for SQLAlchemy encoded in them.
+
+        # test sqlite dialect
+        with mock.patch.object(token_sql, 'sql') as mock_sql:
+            mock_sql.get_session().bind.dialect.name = 'sqlite'
+            tok = token_sql.Token()
+            tok.flush_expired_tokens()
+
+        filter_mock = mock_sql.get_session().query().filter()
+        self.assertFalse(filter_mock.limit.called)
+        self.assertTrue(filter_mock.delete.called_once)
+
+    def test_flush_expired_tokens_batch_mysql(self):
+        # test mysql dialect, we don't need to test IBM DB SA separately, since
+        # other tests below test the differences between how they use the batch
+        # strategy
+        with mock.patch.object(token_sql, 'sql') as mock_sql:
+            mock_sql.get_session().query().filter().delete.return_value = 0
+            mock_sql.get_session().bind.dialect.name = 'mysql'
+            tok = token_sql.Token()
+            expiry_mock = mock.Mock()
+            ITERS = [1, 2, 3]
+            expiry_mock.return_value = iter(ITERS)
+            token_sql._expiry_range_batched = expiry_mock
+            tok.flush_expired_tokens()
+
+            # The expiry strategy is only invoked once, the other calls are via
+            # the yield return.
+            self.assertEqual(1, expiry_mock.call_count)
+            mock_delete = mock_sql.get_session().query().filter().delete
+            self.assertThat(mock_delete.call_args_list,
+                            matchers.HasLength(len(ITERS)))
+
+    def test_expiry_range_batched(self):
+        upper_bound_mock = mock.Mock(side_effect=[1, "final value"])
+        sess_mock = mock.Mock()
+        query_mock = sess_mock.query().filter().order_by().offset().limit()
+        query_mock.one.side_effect = [['test'], sql.NotFound()]
+        for i, x in enumerate(token_sql._expiry_range_batched(sess_mock,
+                                                              upper_bound_mock,
+                                                              batch_size=50)):
+            if i == 0:
+                # The first time the batch iterator returns, it should return
+                # the first result that comes back from the database.
+                self.assertEqual(x, 'test')
+            elif i == 1:
+                # The second time, the database range function should return
+                # nothing, so the batch iterator returns the result of the
+                # upper_bound function
+                self.assertEqual(x, "final value")
+            else:
+                self.fail("range batch function returned more than twice")
+
+    def test_expiry_range_strategy_sqlite(self):
+        tok = token_sql.Token()
+        sqlite_strategy = tok._expiry_range_strategy('sqlite')
+        self.assertEqual(token_sql._expiry_range_all, sqlite_strategy)
+
+    def test_expiry_range_strategy_ibm_db_sa(self):
+        tok = token_sql.Token()
+        db2_strategy = tok._expiry_range_strategy('ibm_db_sa')
+        self.assertIsInstance(db2_strategy, functools.partial)
+        self.assertEqual(db2_strategy.func, token_sql._expiry_range_batched)
+        self.assertEqual(db2_strategy.keywords, {'batch_size': 100})
+
+    def test_expiry_range_strategy_mysql(self):
+        tok = token_sql.Token()
+        mysql_strategy = tok._expiry_range_strategy('mysql')
+        self.assertIsInstance(mysql_strategy, functools.partial)
+        self.assertEqual(mysql_strategy.func, token_sql._expiry_range_batched)
+        self.assertEqual(mysql_strategy.keywords, {'batch_size': 1000})
+
+
+class SqlCatalog(SqlTests, test_backend.CatalogTests):
+
+    _legacy_endpoint_id_in_endpoint = True
+    _enabled_default_to_true_when_creating_endpoint = True
+
+    def test_catalog_ignored_malformed_urls(self):
+        service = {
+            'id': uuid.uuid4().hex,
+            'type': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+        }
+        self.catalog_api.create_service(service['id'], service.copy())
+
+        malformed_url = "http://192.168.1.104:8774/v2/$(tenant)s"
+        endpoint = {
+            'id': uuid.uuid4().hex,
+            'region_id': None,
+            'service_id': service['id'],
+            'interface': 'public',
+            'url': malformed_url,
+        }
+        self.catalog_api.create_endpoint(endpoint['id'], endpoint.copy())
+
+        # NOTE(dstanek): there are no valid URLs, so nothing is in the catalog
+        catalog = self.catalog_api.get_catalog('fake-user', 'fake-tenant')
+        self.assertEqual({}, catalog)
+
+    def test_get_catalog_with_empty_public_url(self):
+        service = {
+            'id': uuid.uuid4().hex,
+            'type': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+        }
+        self.catalog_api.create_service(service['id'], service.copy())
+
+        endpoint = {
+            'id': uuid.uuid4().hex,
+            'region_id': None,
+            'interface': 'public',
+            'url': '',
+            'service_id': service['id'],
+        }
+        self.catalog_api.create_endpoint(endpoint['id'], endpoint.copy())
+
+        catalog = self.catalog_api.get_catalog('user', 'tenant')
+        catalog_endpoint = catalog[endpoint['region_id']][service['type']]
+        self.assertEqual(service['name'], catalog_endpoint['name'])
+        self.assertEqual(endpoint['id'], catalog_endpoint['id'])
+        self.assertEqual('', catalog_endpoint['publicURL'])
+        self.assertIsNone(catalog_endpoint.get('adminURL'))
+        self.assertIsNone(catalog_endpoint.get('internalURL'))
+
+    def test_create_endpoint_region_404(self):
+        service = {
+            'id': uuid.uuid4().hex,
+            'type': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+        }
+        self.catalog_api.create_service(service['id'], service.copy())
+
+        endpoint = {
+            'id': uuid.uuid4().hex,
+            'region_id': uuid.uuid4().hex,
+            'service_id': service['id'],
+            'interface': 'public',
+            'url': uuid.uuid4().hex,
+        }
+
+        self.assertRaises(exception.ValidationError,
+                          self.catalog_api.create_endpoint,
+                          endpoint['id'],
+                          endpoint.copy())
+
+    def test_create_region_invalid_id(self):
+        region = {
+            'id': '0' * 256,
+            'description': '',
+            'extra': {},
+        }
+
+        self.assertRaises(exception.StringLengthExceeded,
+                          self.catalog_api.create_region,
+                          region.copy())
+
+    def test_create_region_invalid_parent_id(self):
+        region = {
+            'id': uuid.uuid4().hex,
+            'parent_region_id': '0' * 256,
+        }
+
+        self.assertRaises(exception.RegionNotFound,
+                          self.catalog_api.create_region,
+                          region)
+
+    def test_delete_region_with_endpoint(self):
+        # create a region
+        region = {
+            'id': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+        }
+        self.catalog_api.create_region(region)
+
+        # create a child region
+        child_region = {
+            'id': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+            'parent_id': region['id']
+        }
+        self.catalog_api.create_region(child_region)
+        # create a service
+        service = {
+            'id': uuid.uuid4().hex,
+            'type': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+        }
+        self.catalog_api.create_service(service['id'], service)
+
+        # create an endpoint attached to the service and child region
+        child_endpoint = {
+            'id': uuid.uuid4().hex,
+            'region_id': child_region['id'],
+            'interface': uuid.uuid4().hex[:8],
+            'url': uuid.uuid4().hex,
+            'service_id': service['id'],
+        }
+        self.catalog_api.create_endpoint(child_endpoint['id'], child_endpoint)
+        self.assertRaises(exception.RegionDeletionError,
+                          self.catalog_api.delete_region,
+                          child_region['id'])
+
+        # create an endpoint attached to the service and parent region
+        endpoint = {
+            'id': uuid.uuid4().hex,
+            'region_id': region['id'],
+            'interface': uuid.uuid4().hex[:8],
+            'url': uuid.uuid4().hex,
+            'service_id': service['id'],
+        }
+        self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+        self.assertRaises(exception.RegionDeletionError,
+                          self.catalog_api.delete_region,
+                          region['id'])
+
+
+class SqlPolicy(SqlTests, test_backend.PolicyTests):
+    pass
+
+
+class SqlInheritance(SqlTests, test_backend.InheritanceTests):
+    pass
+
+
+class SqlTokenCacheInvalidation(SqlTests, test_backend.TokenCacheInvalidation):
+    def setUp(self):
+        super(SqlTokenCacheInvalidation, self).setUp()
+        self._create_test_data()
+
+
+class SqlFilterTests(SqlTests, test_backend.FilterTests):
+
+    def clean_up_entities(self):
+        """Clean up entity test data from Filter Test Cases."""
+
+        for entity in ['user', 'group', 'project']:
+            self._delete_test_data(entity, self.entity_list[entity])
+            self._delete_test_data(entity, self.domain1_entity_list[entity])
+        del self.entity_list
+        del self.domain1_entity_list
+        self.domain1['enabled'] = False
+        self.resource_api.update_domain(self.domain1['id'], self.domain1)
+        self.resource_api.delete_domain(self.domain1['id'])
+        del self.domain1
+
+    def test_list_entities_filtered_by_domain(self):
+        # NOTE(henry-nash): This method is here rather than in test_backend
+        # since any domain filtering with LDAP is handled by the manager
+        # layer (and is already tested elsewhere) not at the driver level.
+        self.addCleanup(self.clean_up_entities)
+        self.domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(self.domain1['id'], self.domain1)
+
+        self.entity_list = {}
+        self.domain1_entity_list = {}
+        for entity in ['user', 'group', 'project']:
+            # Create 5 entities, 3 of which are in domain1
+            DOMAIN1_ENTITIES = 3
+            self.entity_list[entity] = self._create_test_data(entity, 2)
+            self.domain1_entity_list[entity] = self._create_test_data(
+                entity, DOMAIN1_ENTITIES, self.domain1['id'])
+
+            # Should get back the DOMAIN1_ENTITIES in domain1
+            hints = driver_hints.Hints()
+            hints.add_filter('domain_id', self.domain1['id'])
+            entities = self._list_entities(entity)(hints=hints)
+            self.assertEqual(DOMAIN1_ENTITIES, len(entities))
+            self._match_with_list(entities, self.domain1_entity_list[entity])
+            # Check the driver has removed the filter from the list hints
+            self.assertFalse(hints.get_exact_filter_by_name('domain_id'))
+
+    def test_filter_sql_injection_attack(self):
+        """Test against sql injection attack on filters
+
+        Test Plan:
+        - Attempt to get all entities back by passing a two-term attribute
+        - Attempt to piggyback filter to damage DB (e.g. drop table)
+
+        """
+        # Check we have some users
+        users = self.identity_api.list_users()
+        self.assertTrue(len(users) > 0)
+
+        hints = driver_hints.Hints()
+        hints.add_filter('name', "anything' or 'x'='x")
+        users = self.identity_api.list_users(hints=hints)
+        self.assertEqual(0, len(users))
+
+        # See if we can add a SQL command...use the group table instead of the
+        # user table since 'user' is reserved word for SQLAlchemy.
+        group = {'name': uuid.uuid4().hex, 'domain_id': DEFAULT_DOMAIN_ID}
+        group = self.identity_api.create_group(group)
+
+        hints = driver_hints.Hints()
+        hints.add_filter('name', "x'; drop table group")
+        groups = self.identity_api.list_groups(hints=hints)
+        self.assertEqual(0, len(groups))
+
+        groups = self.identity_api.list_groups()
+        self.assertTrue(len(groups) > 0)
+
+    def test_groups_for_user_filtered(self):
+        # The SQL identity driver currently does not support filtering on the
+        # listing groups for a given user, so will fail this test. This is
+        # raised as bug #1412447.
+        try:
+            super(SqlFilterTests, self).test_groups_for_user_filtered()
+        except matchers.MismatchError:
+            return
+        # We shouldn't get here...if we do, it means someone has fixed the
+        # above defect, so we can remove this test override. As an aside, it
+        # would be nice to have used self.assertRaises() around the call above
+        # to achieve the logic here...but that does not seem to work when
+        # wrapping another assert (it won't seem to catch the error).
+        self.assertTrue(False)
+
+
+class SqlLimitTests(SqlTests, test_backend.LimitTests):
+    def setUp(self):
+        super(SqlLimitTests, self).setUp()
+        test_backend.LimitTests.setUp(self)
+
+
+class FakeTable(sql.ModelBase):
+    __tablename__ = 'test_table'
+    col = sql.Column(sql.String(32), primary_key=True)
+
+    @sql.handle_conflicts('keystone')
+    def insert(self):
+        raise db_exception.DBDuplicateEntry
+
+    @sql.handle_conflicts('keystone')
+    def update(self):
+        raise db_exception.DBError(
+            inner_exception=exc.IntegrityError('a', 'a', 'a'))
+
+    @sql.handle_conflicts('keystone')
+    def lookup(self):
+        raise KeyError
+
+
+class SqlDecorators(tests.TestCase):
+
+    def test_initialization_fail(self):
+        self.assertRaises(exception.StringLengthExceeded,
+                          FakeTable, col='a' * 64)
+
+    def test_initialization(self):
+        tt = FakeTable(col='a')
+        self.assertEqual('a', tt.col)
+
+    def test_non_ascii_init(self):
+        # NOTE(I159): Non ASCII characters must cause UnicodeDecodeError
+        # if encoding is not provided explicitly.
+        self.assertRaises(UnicodeDecodeError, FakeTable, col='Я')
+
+    def test_conflict_happend(self):
+        self.assertRaises(exception.Conflict, FakeTable().insert)
+        self.assertRaises(exception.UnexpectedError, FakeTable().update)
+
+    def test_not_conflict_error(self):
+        self.assertRaises(KeyError, FakeTable().lookup)
+
+
+class SqlModuleInitialization(tests.TestCase):
+
+    @mock.patch.object(sql.core, 'CONF')
+    @mock.patch.object(options, 'set_defaults')
+    def test_initialize_module(self, set_defaults, CONF):
+        sql.initialize()
+        set_defaults.assert_called_with(CONF,
+                                        connection='sqlite:///keystone.db')
+
+
+class SqlCredential(SqlTests):
+
+    def _create_credential_with_user_id(self, user_id=uuid.uuid4().hex):
+        credential_id = uuid.uuid4().hex
+        new_credential = {
+            'id': credential_id,
+            'user_id': user_id,
+            'project_id': uuid.uuid4().hex,
+            'blob': uuid.uuid4().hex,
+            'type': uuid.uuid4().hex,
+            'extra': uuid.uuid4().hex
+        }
+        self.credential_api.create_credential(credential_id, new_credential)
+        return new_credential
+
+    def _validateCredentialList(self, retrieved_credentials,
+                                expected_credentials):
+        self.assertEqual(len(retrieved_credentials), len(expected_credentials))
+        retrived_ids = [c['id'] for c in retrieved_credentials]
+        for cred in expected_credentials:
+            self.assertIn(cred['id'], retrived_ids)
+
+    def setUp(self):
+        super(SqlCredential, self).setUp()
+        self.credentials = []
+        for _ in range(3):
+            self.credentials.append(
+                self._create_credential_with_user_id())
+        self.user_credentials = []
+        for _ in range(3):
+            cred = self._create_credential_with_user_id(self.user_foo['id'])
+            self.user_credentials.append(cred)
+            self.credentials.append(cred)
+
+    def test_list_credentials(self):
+        credentials = self.credential_api.list_credentials()
+        self._validateCredentialList(credentials, self.credentials)
+        # test filtering using hints
+        hints = driver_hints.Hints()
+        hints.add_filter('user_id', self.user_foo['id'])
+        credentials = self.credential_api.list_credentials(hints)
+        self._validateCredentialList(credentials, self.user_credentials)
+
+    def test_list_credentials_for_user(self):
+        credentials = self.credential_api.list_credentials_for_user(
+            self.user_foo['id'])
+        self._validateCredentialList(credentials, self.user_credentials)
+
+
+class DeprecatedDecorators(SqlTests):
+
+    def test_assignment_to_role_api(self):
+        """Test that calling one of the methods does call LOG.deprecated.
+
+        This method is really generic to the type of backend, but we need
+        one to execute the test, so the SQL backend is as good as any.
+
+        """
+
+        # Rather than try and check that a log message is issued, we
+        # enable fatal_deprecations so that we can check for the
+        # raising of the exception.
+
+        # First try to create a role without enabling fatal deprecations,
+        # which should work due to the cross manager deprecated calls.
+        role_ref = {
+            'id': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex}
+        self.assignment_api.create_role(role_ref['id'], role_ref)
+        self.role_api.get_role(role_ref['id'])
+
+        # Now enable fatal exceptions - creating a role by calling the
+        # old manager should now fail.
+        self.config_fixture.config(fatal_deprecations=True)
+        role_ref = {
+            'id': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex}
+        self.assertRaises(versionutils.DeprecatedConfig,
+                          self.assignment_api.create_role,
+                          role_ref['id'], role_ref)
+
+    def test_assignment_to_resource_api(self):
+        """Test that calling one of the methods does call LOG.deprecated.
+
+        This method is really generic to the type of backend, but we need
+        one to execute the test, so the SQL backend is as good as any.
+
+        """
+
+        # Rather than try and check that a log message is issued, we
+        # enable fatal_deprecations so that we can check for the
+        # raising of the exception.
+
+        # First try to create a project without enabling fatal deprecations,
+        # which should work due to the cross manager deprecated calls.
+        project_ref = {
+            'id': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'domain_id': DEFAULT_DOMAIN_ID}
+        self.resource_api.create_project(project_ref['id'], project_ref)
+        self.resource_api.get_project(project_ref['id'])
+
+        # Now enable fatal exceptions - creating a project by calling the
+        # old manager should now fail.
+        self.config_fixture.config(fatal_deprecations=True)
+        project_ref = {
+            'id': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'domain_id': DEFAULT_DOMAIN_ID}
+        self.assertRaises(versionutils.DeprecatedConfig,
+                          self.assignment_api.create_project,
+                          project_ref['id'], project_ref)
diff --git a/keystone-moon/keystone/tests/unit/test_backend_templated.py b/keystone-moon/keystone/tests/unit/test_backend_templated.py
new file mode 100644 (file)
index 0000000..a1c15fb
--- /dev/null
@@ -0,0 +1,127 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import uuid
+
+from keystone.tests import unit as tests
+from keystone.tests.unit import default_fixtures
+from keystone.tests.unit.ksfixtures import database
+from keystone.tests.unit import test_backend
+
+
+DEFAULT_CATALOG_TEMPLATES = os.path.abspath(os.path.join(
+    os.path.dirname(__file__),
+    'default_catalog.templates'))
+
+
+class TestTemplatedCatalog(tests.TestCase, test_backend.CatalogTests):
+
+    DEFAULT_FIXTURE = {
+        'RegionOne': {
+            'compute': {
+                'adminURL': 'http://localhost:8774/v1.1/bar',
+                'publicURL': 'http://localhost:8774/v1.1/bar',
+                'internalURL': 'http://localhost:8774/v1.1/bar',
+                'name': "'Compute Service'",
+                'id': '2'
+            },
+            'identity': {
+                'adminURL': 'http://localhost:35357/v2.0',
+                'publicURL': 'http://localhost:5000/v2.0',
+                'internalURL': 'http://localhost:35357/v2.0',
+                'name': "'Identity Service'",
+                'id': '1'
+            }
+        }
+    }
+
+    def setUp(self):
+        super(TestTemplatedCatalog, self).setUp()
+        self.useFixture(database.Database())
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+
+    def config_overrides(self):
+        super(TestTemplatedCatalog, self).config_overrides()
+        self.config_fixture.config(group='catalog',
+                                   template_file=DEFAULT_CATALOG_TEMPLATES)
+
+    def test_get_catalog(self):
+        catalog_ref = self.catalog_api.get_catalog('foo', 'bar')
+        self.assertDictEqual(catalog_ref, self.DEFAULT_FIXTURE)
+
+    def test_catalog_ignored_malformed_urls(self):
+        # both endpoints are in the catalog
+        catalog_ref = self.catalog_api.get_catalog('foo', 'bar')
+        self.assertEqual(2, len(catalog_ref['RegionOne']))
+
+        region = self.catalog_api.driver.templates['RegionOne']
+        region['compute']['adminURL'] = 'http://localhost:8774/v1.1/$(tenant)s'
+
+        # the malformed one has been removed
+        catalog_ref = self.catalog_api.get_catalog('foo', 'bar')
+        self.assertEqual(1, len(catalog_ref['RegionOne']))
+
+    def test_get_catalog_endpoint_disabled(self):
+        self.skipTest("Templated backend doesn't have disabled endpoints")
+
+    def test_get_v3_catalog_endpoint_disabled(self):
+        self.skipTest("Templated backend doesn't have disabled endpoints")
+
+    def assert_catalogs_equal(self, expected, observed):
+        for e, o in zip(sorted(expected), sorted(observed)):
+            expected_endpoints = e.pop('endpoints')
+            observed_endpoints = o.pop('endpoints')
+            self.assertDictEqual(e, o)
+            self.assertItemsEqual(expected_endpoints, observed_endpoints)
+
+    def test_get_v3_catalog(self):
+        user_id = uuid.uuid4().hex
+        project_id = uuid.uuid4().hex
+        catalog_ref = self.catalog_api.get_v3_catalog(user_id, project_id)
+        exp_catalog = [
+            {'endpoints': [
+                {'interface': 'admin',
+                 'region': 'RegionOne',
+                 'url': 'http://localhost:8774/v1.1/%s' % project_id},
+                {'interface': 'public',
+                 'region': 'RegionOne',
+                 'url': 'http://localhost:8774/v1.1/%s' % project_id},
+                {'interface': 'internal',
+                 'region': 'RegionOne',
+                 'url': 'http://localhost:8774/v1.1/%s' % project_id}],
+             'type': 'compute',
+             'name': "'Compute Service'",
+             'id': '2'},
+            {'endpoints': [
+                {'interface': 'admin',
+                 'region': 'RegionOne',
+                 'url': 'http://localhost:35357/v2.0'},
+                {'interface': 'public',
+                 'region': 'RegionOne',
+                 'url': 'http://localhost:5000/v2.0'},
+                {'interface': 'internal',
+                 'region': 'RegionOne',
+                 'url': 'http://localhost:35357/v2.0'}],
+             'type': 'identity',
+             'name': "'Identity Service'",
+             'id': '1'}]
+        self.assert_catalogs_equal(exp_catalog, catalog_ref)
+
+    def test_list_regions_filtered_by_parent_region_id(self):
+        self.skipTest('Templated backend does not support hints')
+
+    def test_service_filtering(self):
+        self.skipTest("Templated backend doesn't support filtering")
diff --git a/keystone-moon/keystone/tests/unit/test_cache.py b/keystone-moon/keystone/tests/unit/test_cache.py
new file mode 100644 (file)
index 0000000..5a778a0
--- /dev/null
@@ -0,0 +1,322 @@
+# Copyright 2013 Metacloud
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import time
+import uuid
+
+from dogpile.cache import api
+from dogpile.cache import proxy
+import mock
+from oslo_config import cfg
+
+from keystone.common import cache
+from keystone import exception
+from keystone.tests import unit as tests
+
+
+CONF = cfg.CONF
+NO_VALUE = api.NO_VALUE
+
+
+def _copy_value(value):
+    if value is not NO_VALUE:
+        value = copy.deepcopy(value)
+    return value
+
+
+# NOTE(morganfainberg): WARNING - It is not recommended to use the Memory
+# backend for dogpile.cache in a real deployment under any circumstances. The
+# backend does no cleanup of expired values and therefore will leak memory. The
+# backend is not implemented in a way to share data across processes (e.g.
+# Keystone in HTTPD.  This proxy is a hack to get around the lack of isolation
+# of values in memory.  Currently it blindly stores and retrieves the values
+# from the cache, and modifications to dicts/lists/etc returned can result in
+# changes to the cached values.  In short, do not use the dogpile.cache.memory
+# backend unless you are running tests or expecting odd/strange results.
+class CacheIsolatingProxy(proxy.ProxyBackend):
+    """Proxy that forces a memory copy of stored values.
+    The default in-memory cache-region does not perform a copy on values it
+    is meant to cache.  Therefore if the value is modified after set or after
+    get, the cached value also is modified.  This proxy does a copy as the last
+    thing before storing data.
+    """
+    def get(self, key):
+        return _copy_value(self.proxied.get(key))
+
+    def set(self, key, value):
+        self.proxied.set(key, _copy_value(value))
+
+
+class TestProxy(proxy.ProxyBackend):
+    def get(self, key):
+        value = _copy_value(self.proxied.get(key))
+        if value is not NO_VALUE:
+            if isinstance(value[0], TestProxyValue):
+                value[0].cached = True
+        return value
+
+
+class TestProxyValue(object):
+    def __init__(self, value):
+        self.value = value
+        self.cached = False
+
+
+class CacheRegionTest(tests.TestCase):
+
+    def setUp(self):
+        super(CacheRegionTest, self).setUp()
+        self.region = cache.make_region()
+        cache.configure_cache_region(self.region)
+        self.region.wrap(TestProxy)
+        self.test_value = TestProxyValue('Decorator Test')
+
+    def _add_test_caching_option(self):
+        self.config_fixture.register_opt(
+            cfg.BoolOpt('caching', default=True), group='cache')
+
+    def _get_cacheable_function(self):
+        with mock.patch.object(cache.REGION, 'cache_on_arguments',
+                               self.region.cache_on_arguments):
+            memoize = cache.get_memoization_decorator(section='cache')
+
+            @memoize
+            def cacheable_function(value):
+                return value
+
+        return cacheable_function
+
+    def test_region_built_with_proxy_direct_cache_test(self):
+        # Verify cache regions are properly built with proxies.
+        test_value = TestProxyValue('Direct Cache Test')
+        self.region.set('cache_test', test_value)
+        cached_value = self.region.get('cache_test')
+        self.assertTrue(cached_value.cached)
+
+    def test_cache_region_no_error_multiple_config(self):
+        # Verify configuring the CacheRegion again doesn't error.
+        cache.configure_cache_region(self.region)
+        cache.configure_cache_region(self.region)
+
+    def _get_cache_fallthrough_fn(self, cache_time):
+        with mock.patch.object(cache.REGION, 'cache_on_arguments',
+                               self.region.cache_on_arguments):
+            memoize = cache.get_memoization_decorator(
+                section='cache',
+                expiration_section='assignment')
+
+            class _test_obj(object):
+                def __init__(self, value):
+                    self.test_value = value
+
+                @memoize
+                def get_test_value(self):
+                    return self.test_value
+
+            def _do_test(value):
+
+                test_obj = _test_obj(value)
+
+                # Ensure the value has been cached
+                test_obj.get_test_value()
+                # Get the now cached value
+                cached_value = test_obj.get_test_value()
+                self.assertTrue(cached_value.cached)
+                self.assertEqual(value.value, cached_value.value)
+                self.assertEqual(cached_value.value, test_obj.test_value.value)
+                # Change the underlying value on the test object.
+                test_obj.test_value = TestProxyValue(uuid.uuid4().hex)
+                self.assertEqual(cached_value.value,
+                                 test_obj.get_test_value().value)
+                # override the system time to ensure the non-cached new value
+                # is returned
+                new_time = time.time() + (cache_time * 2)
+                with mock.patch.object(time, 'time',
+                                       return_value=new_time):
+                    overriden_cache_value = test_obj.get_test_value()
+                    self.assertNotEqual(cached_value.value,
+                                        overriden_cache_value.value)
+                    self.assertEqual(test_obj.test_value.value,
+                                     overriden_cache_value.value)
+
+        return _do_test
+
+    def test_cache_no_fallthrough_expiration_time_fn(self):
+        # Since we do not re-configure the cache region, for ease of testing
+        # this value is set the same as the expiration_time default in the
+        # [cache] section
+        cache_time = 600
+        expiration_time = cache.get_expiration_time_fn('role')
+        do_test = self._get_cache_fallthrough_fn(cache_time)
+        # Run the test with the assignment cache_time value
+        self.config_fixture.config(cache_time=cache_time,
+                                   group='role')
+        test_value = TestProxyValue(uuid.uuid4().hex)
+        self.assertEqual(cache_time, expiration_time())
+        do_test(value=test_value)
+
+    def test_cache_fallthrough_expiration_time_fn(self):
+        # Since we do not re-configure the cache region, for ease of testing
+        # this value is set the same as the expiration_time default in the
+        # [cache] section
+        cache_time = 599
+        expiration_time = cache.get_expiration_time_fn('role')
+        do_test = self._get_cache_fallthrough_fn(cache_time)
+        # Run the test with the assignment cache_time value set to None and
+        # the global value set.
+        self.config_fixture.config(cache_time=None, group='role')
+        test_value = TestProxyValue(uuid.uuid4().hex)
+        self.assertIsNone(expiration_time())
+        do_test(value=test_value)
+
+    def test_should_cache_fn_global_cache_enabled(self):
+        # Verify should_cache_fn generates a sane function for subsystem and
+        # functions as expected with caching globally enabled.
+        cacheable_function = self._get_cacheable_function()
+
+        self.config_fixture.config(group='cache', enabled=True)
+        cacheable_function(self.test_value)
+        cached_value = cacheable_function(self.test_value)
+        self.assertTrue(cached_value.cached)
+
+    def test_should_cache_fn_global_cache_disabled(self):
+        # Verify should_cache_fn generates a sane function for subsystem and
+        # functions as expected with caching globally disabled.
+        cacheable_function = self._get_cacheable_function()
+
+        self.config_fixture.config(group='cache', enabled=False)
+        cacheable_function(self.test_value)
+        cached_value = cacheable_function(self.test_value)
+        self.assertFalse(cached_value.cached)
+
+    def test_should_cache_fn_global_cache_disabled_section_cache_enabled(self):
+        # Verify should_cache_fn generates a sane function for subsystem and
+        # functions as expected with caching globally disabled and the specific
+        # section caching enabled.
+        cacheable_function = self._get_cacheable_function()
+
+        self._add_test_caching_option()
+        self.config_fixture.config(group='cache', enabled=False)
+        self.config_fixture.config(group='cache', caching=True)
+
+        cacheable_function(self.test_value)
+        cached_value = cacheable_function(self.test_value)
+        self.assertFalse(cached_value.cached)
+
+    def test_should_cache_fn_global_cache_enabled_section_cache_disabled(self):
+        # Verify should_cache_fn generates a sane function for subsystem and
+        # functions as expected with caching globally enabled and the specific
+        # section caching disabled.
+        cacheable_function = self._get_cacheable_function()
+
+        self._add_test_caching_option()
+        self.config_fixture.config(group='cache', enabled=True)
+        self.config_fixture.config(group='cache', caching=False)
+
+        cacheable_function(self.test_value)
+        cached_value = cacheable_function(self.test_value)
+        self.assertFalse(cached_value.cached)
+
+    def test_should_cache_fn_global_cache_enabled_section_cache_enabled(self):
+        # Verify should_cache_fn generates a sane function for subsystem and
+        # functions as expected with caching globally enabled and the specific
+        # section caching enabled.
+        cacheable_function = self._get_cacheable_function()
+
+        self._add_test_caching_option()
+        self.config_fixture.config(group='cache', enabled=True)
+        self.config_fixture.config(group='cache', caching=True)
+
+        cacheable_function(self.test_value)
+        cached_value = cacheable_function(self.test_value)
+        self.assertTrue(cached_value.cached)
+
+    def test_cache_dictionary_config_builder(self):
+        """Validate we build a sane dogpile.cache dictionary config."""
+        self.config_fixture.config(group='cache',
+                                   config_prefix='test_prefix',
+                                   backend='some_test_backend',
+                                   expiration_time=86400,
+                                   backend_argument=['arg1:test',
+                                                     'arg2:test:test',
+                                                     'arg3.invalid'])
+
+        config_dict = cache.build_cache_config()
+        self.assertEqual(
+            CONF.cache.backend, config_dict['test_prefix.backend'])
+        self.assertEqual(
+            CONF.cache.expiration_time,
+            config_dict['test_prefix.expiration_time'])
+        self.assertEqual('test', config_dict['test_prefix.arguments.arg1'])
+        self.assertEqual('test:test',
+                         config_dict['test_prefix.arguments.arg2'])
+        self.assertNotIn('test_prefix.arguments.arg3', config_dict)
+
+    def test_cache_debug_proxy(self):
+        single_value = 'Test Value'
+        single_key = 'testkey'
+        multi_values = {'key1': 1, 'key2': 2, 'key3': 3}
+
+        self.region.set(single_key, single_value)
+        self.assertEqual(single_value, self.region.get(single_key))
+
+        self.region.delete(single_key)
+        self.assertEqual(NO_VALUE, self.region.get(single_key))
+
+        self.region.set_multi(multi_values)
+        cached_values = self.region.get_multi(multi_values.keys())
+        for value in multi_values.values():
+            self.assertIn(value, cached_values)
+        self.assertEqual(len(multi_values.values()), len(cached_values))
+
+        self.region.delete_multi(multi_values.keys())
+        for value in self.region.get_multi(multi_values.keys()):
+            self.assertEqual(NO_VALUE, value)
+
+    def test_configure_non_region_object_raises_error(self):
+        self.assertRaises(exception.ValidationError,
+                          cache.configure_cache_region,
+                          "bogus")
+
+
+class CacheNoopBackendTest(tests.TestCase):
+
+    def setUp(self):
+        super(CacheNoopBackendTest, self).setUp()
+        self.region = cache.make_region()
+        cache.configure_cache_region(self.region)
+
+    def config_overrides(self):
+        super(CacheNoopBackendTest, self).config_overrides()
+        self.config_fixture.config(group='cache',
+                                   backend='keystone.common.cache.noop')
+
+    def test_noop_backend(self):
+        single_value = 'Test Value'
+        single_key = 'testkey'
+        multi_values = {'key1': 1, 'key2': 2, 'key3': 3}
+
+        self.region.set(single_key, single_value)
+        self.assertEqual(NO_VALUE, self.region.get(single_key))
+
+        self.region.set_multi(multi_values)
+        cached_values = self.region.get_multi(multi_values.keys())
+        self.assertEqual(len(cached_values), len(multi_values.values()))
+        for value in cached_values:
+            self.assertEqual(NO_VALUE, value)
+
+        # Delete should not raise exceptions
+        self.region.delete(single_key)
+        self.region.delete_multi(multi_values.keys())
diff --git a/keystone-moon/keystone/tests/unit/test_cache_backend_mongo.py b/keystone-moon/keystone/tests/unit/test_cache_backend_mongo.py
new file mode 100644 (file)
index 0000000..a56bf75
--- /dev/null
@@ -0,0 +1,727 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+import copy
+import functools
+import uuid
+
+from dogpile.cache import api
+from dogpile.cache import region as dp_region
+import six
+
+from keystone.common.cache.backends import mongo
+from keystone import exception
+from keystone.tests import unit as tests
+
+
+# Mock database structure sample where 'ks_cache' is database and
+# 'cache' is collection. Dogpile CachedValue data is divided in two
+# fields `value` (CachedValue.payload) and `meta` (CachedValue.metadata)
+ks_cache = {
+    "cache": [
+        {
+            "value": {
+                "serviceType": "identity",
+                "allVersionsUrl": "https://dummyUrl",
+                "dateLastModified": "ISODDate(2014-02-08T18:39:13.237Z)",
+                "serviceName": "Identity",
+                "enabled": "True"
+            },
+            "meta": {
+                "v": 1,
+                "ct": 1392371422.015121
+            },
+            "doc_date": "ISODate('2014-02-14T09:50:22.015Z')",
+            "_id": "8251dc95f63842719c077072f1047ddf"
+        },
+        {
+            "value": "dummyValueX",
+            "meta": {
+                "v": 1,
+                "ct": 1392371422.014058
+            },
+            "doc_date": "ISODate('2014-02-14T09:50:22.014Z')",
+            "_id": "66730b9534d146f0804d23729ad35436"
+        }
+    ]
+}
+
+
+COLLECTIONS = {}
+SON_MANIPULATOR = None
+
+
+class MockCursor(object):
+
+    def __init__(self, collection, dataset_factory):
+        super(MockCursor, self).__init__()
+        self.collection = collection
+        self._factory = dataset_factory
+        self._dataset = self._factory()
+        self._limit = None
+        self._skip = None
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        if self._skip:
+            for _ in range(self._skip):
+                next(self._dataset)
+            self._skip = None
+        if self._limit is not None and self._limit <= 0:
+            raise StopIteration()
+        if self._limit is not None:
+            self._limit -= 1
+        return next(self._dataset)
+
+    next = __next__
+
+    def __getitem__(self, index):
+        arr = [x for x in self._dataset]
+        self._dataset = iter(arr)
+        return arr[index]
+
+
+class MockCollection(object):
+
+    def __init__(self, db, name):
+        super(MockCollection, self).__init__()
+        self.name = name
+        self._collection_database = db
+        self._documents = {}
+        self.write_concern = {}
+
+    def __getattr__(self, name):
+        if name == 'database':
+            return self._collection_database
+
+    def ensure_index(self, key_or_list, *args, **kwargs):
+        pass
+
+    def index_information(self):
+        return {}
+
+    def find_one(self, spec_or_id=None, *args, **kwargs):
+        if spec_or_id is None:
+            spec_or_id = {}
+        if not isinstance(spec_or_id, collections.Mapping):
+            spec_or_id = {'_id': spec_or_id}
+
+        try:
+            return next(self.find(spec_or_id, *args, **kwargs))
+        except StopIteration:
+            return None
+
+    def find(self, spec=None, *args, **kwargs):
+        return MockCursor(self, functools.partial(self._get_dataset, spec))
+
+    def _get_dataset(self, spec):
+        dataset = (self._copy_doc(document, dict) for document in
+                   self._iter_documents(spec))
+        return dataset
+
+    def _iter_documents(self, spec=None):
+        return (SON_MANIPULATOR.transform_outgoing(document, self) for
+                document in six.itervalues(self._documents)
+                if self._apply_filter(document, spec))
+
+    def _apply_filter(self, document, query):
+        for key, search in six.iteritems(query):
+            doc_val = document.get(key)
+            if isinstance(search, dict):
+                op_dict = {'$in': lambda dv, sv: dv in sv}
+                is_match = all(
+                    op_str in op_dict and op_dict[op_str](doc_val, search_val)
+                    for op_str, search_val in six.iteritems(search)
+                )
+            else:
+                is_match = doc_val == search
+
+        return is_match
+
+    def _copy_doc(self, obj, container):
+        if isinstance(obj, list):
+            new = []
+            for item in obj:
+                new.append(self._copy_doc(item, container))
+            return new
+        if isinstance(obj, dict):
+            new = container()
+            for key, value in obj.items():
+                new[key] = self._copy_doc(value, container)
+            return new
+        else:
+            return copy.copy(obj)
+
+    def insert(self, data, manipulate=True, **kwargs):
+        if isinstance(data, list):
+            return [self._insert(element) for element in data]
+        return self._insert(data)
+
+    def save(self, data, manipulate=True, **kwargs):
+        return self._insert(data)
+
+    def _insert(self, data):
+        if '_id' not in data:
+            data['_id'] = uuid.uuid4().hex
+        object_id = data['_id']
+        self._documents[object_id] = self._internalize_dict(data)
+        return object_id
+
+    def find_and_modify(self, spec, document, upsert=False, **kwargs):
+        self.update(spec, document, upsert, **kwargs)
+
+    def update(self, spec, document, upsert=False, **kwargs):
+
+        existing_docs = [doc for doc in six.itervalues(self._documents)
+                         if self._apply_filter(doc, spec)]
+        if existing_docs:
+            existing_doc = existing_docs[0]  # should find only 1 match
+            _id = existing_doc['_id']
+            existing_doc.clear()
+            existing_doc['_id'] = _id
+            existing_doc.update(self._internalize_dict(document))
+        elif upsert:
+            existing_doc = self._documents[self._insert(document)]
+
+    def _internalize_dict(self, d):
+        return {k: copy.deepcopy(v) for k, v in six.iteritems(d)}
+
+    def remove(self, spec_or_id=None, search_filter=None):
+        """Remove objects matching spec_or_id from the collection."""
+        if spec_or_id is None:
+            spec_or_id = search_filter if search_filter else {}
+        if not isinstance(spec_or_id, dict):
+            spec_or_id = {'_id': spec_or_id}
+        to_delete = list(self.find(spec=spec_or_id))
+        for doc in to_delete:
+            doc_id = doc['_id']
+            del self._documents[doc_id]
+
+        return {
+            "connectionId": uuid.uuid4().hex,
+            "n": len(to_delete),
+            "ok": 1.0,
+            "err": None,
+        }
+
+
+class MockMongoDB(object):
+    def __init__(self, dbname):
+        self._dbname = dbname
+        self.mainpulator = None
+
+    def authenticate(self, username, password):
+        pass
+
+    def add_son_manipulator(self, manipulator):
+        global SON_MANIPULATOR
+        SON_MANIPULATOR = manipulator
+
+    def __getattr__(self, name):
+        if name == 'authenticate':
+            return self.authenticate
+        elif name == 'name':
+            return self._dbname
+        elif name == 'add_son_manipulator':
+            return self.add_son_manipulator
+        else:
+            return get_collection(self._dbname, name)
+
+    def __getitem__(self, name):
+        return get_collection(self._dbname, name)
+
+
+class MockMongoClient(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __getattr__(self, dbname):
+        return MockMongoDB(dbname)
+
+
+def get_collection(db_name, collection_name):
+    mongo_collection = MockCollection(MockMongoDB(db_name), collection_name)
+    return mongo_collection
+
+
+def pymongo_override():
+    global pymongo
+    import pymongo
+    if pymongo.MongoClient is not MockMongoClient:
+        pymongo.MongoClient = MockMongoClient
+    if pymongo.MongoReplicaSetClient is not MockMongoClient:
+        pymongo.MongoClient = MockMongoClient
+
+
+class MyTransformer(mongo.BaseTransform):
+    """Added here just to check manipulator logic is used correctly."""
+
+    def transform_incoming(self, son, collection):
+        return super(MyTransformer, self).transform_incoming(son, collection)
+
+    def transform_outgoing(self, son, collection):
+        return super(MyTransformer, self).transform_outgoing(son, collection)
+
+
+class MongoCache(tests.BaseTestCase):
+    def setUp(self):
+        super(MongoCache, self).setUp()
+        global COLLECTIONS
+        COLLECTIONS = {}
+        mongo.MongoApi._DB = {}
+        mongo.MongoApi._MONGO_COLLS = {}
+        pymongo_override()
+        # using typical configuration
+        self.arguments = {
+            'db_hosts': 'localhost:27017',
+            'db_name': 'ks_cache',
+            'cache_collection': 'cache',
+            'username': 'test_user',
+            'password': 'test_password'
+        }
+
+    def test_missing_db_hosts(self):
+        self.arguments.pop('db_hosts')
+        region = dp_region.make_region()
+        self.assertRaises(exception.ValidationError, region.configure,
+                          'keystone.cache.mongo',
+                          arguments=self.arguments)
+
+    def test_missing_db_name(self):
+        self.arguments.pop('db_name')
+        region = dp_region.make_region()
+        self.assertRaises(exception.ValidationError, region.configure,
+                          'keystone.cache.mongo',
+                          arguments=self.arguments)
+
+    def test_missing_cache_collection_name(self):
+        self.arguments.pop('cache_collection')
+        region = dp_region.make_region()
+        self.assertRaises(exception.ValidationError, region.configure,
+                          'keystone.cache.mongo',
+                          arguments=self.arguments)
+
+    def test_incorrect_write_concern(self):
+        self.arguments['w'] = 'one value'
+        region = dp_region.make_region()
+        self.assertRaises(exception.ValidationError, region.configure,
+                          'keystone.cache.mongo',
+                          arguments=self.arguments)
+
+    def test_correct_write_concern(self):
+        self.arguments['w'] = 1
+        region = dp_region.make_region().configure('keystone.cache.mongo',
+                                                   arguments=self.arguments)
+
+        random_key = uuid.uuid4().hex
+        region.set(random_key, "dummyValue10")
+        # There is no proxy so can access MongoCacheBackend directly
+        self.assertEqual(1, region.backend.api.w)
+
+    def test_incorrect_read_preference(self):
+        self.arguments['read_preference'] = 'inValidValue'
+        region = dp_region.make_region().configure('keystone.cache.mongo',
+                                                   arguments=self.arguments)
+        # As per delayed loading of pymongo, read_preference value should
+        # still be string and NOT enum
+        self.assertEqual('inValidValue', region.backend.api.read_preference)
+
+        random_key = uuid.uuid4().hex
+        self.assertRaises(ValueError, region.set,
+                          random_key, "dummyValue10")
+
+    def test_correct_read_preference(self):
+        self.arguments['read_preference'] = 'secondaryPreferred'
+        region = dp_region.make_region().configure('keystone.cache.mongo',
+                                                   arguments=self.arguments)
+        # As per delayed loading of pymongo, read_preference value should
+        # still be string and NOT enum
+        self.assertEqual('secondaryPreferred',
+                         region.backend.api.read_preference)
+
+        random_key = uuid.uuid4().hex
+        region.set(random_key, "dummyValue10")
+
+        # Now as pymongo is loaded so expected read_preference value is enum.
+        # There is no proxy so can access MongoCacheBackend directly
+        self.assertEqual(3, region.backend.api.read_preference)
+
+    def test_missing_replica_set_name(self):
+        self.arguments['use_replica'] = True
+        region = dp_region.make_region()
+        self.assertRaises(exception.ValidationError, region.configure,
+                          'keystone.cache.mongo',
+                          arguments=self.arguments)
+
+    def test_provided_replica_set_name(self):
+        self.arguments['use_replica'] = True
+        self.arguments['replicaset_name'] = 'my_replica'
+        dp_region.make_region().configure('keystone.cache.mongo',
+                                          arguments=self.arguments)
+        self.assertTrue(True)  # reached here means no initialization error
+
+    def test_incorrect_mongo_ttl_seconds(self):
+        self.arguments['mongo_ttl_seconds'] = 'sixty'
+        region = dp_region.make_region()
+        self.assertRaises(exception.ValidationError, region.configure,
+                          'keystone.cache.mongo',
+                          arguments=self.arguments)
+
+    def test_cache_configuration_values_assertion(self):
+        self.arguments['use_replica'] = True
+        self.arguments['replicaset_name'] = 'my_replica'
+        self.arguments['mongo_ttl_seconds'] = 60
+        self.arguments['ssl'] = False
+        region = dp_region.make_region().configure('keystone.cache.mongo',
+                                                   arguments=self.arguments)
+        # There is no proxy so can access MongoCacheBackend directly
+        self.assertEqual('localhost:27017', region.backend.api.hosts)
+        self.assertEqual('ks_cache', region.backend.api.db_name)
+        self.assertEqual('cache', region.backend.api.cache_collection)
+        self.assertEqual('test_user', region.backend.api.username)
+        self.assertEqual('test_password', region.backend.api.password)
+        self.assertEqual(True, region.backend.api.use_replica)
+        self.assertEqual('my_replica', region.backend.api.replicaset_name)
+        self.assertEqual(False, region.backend.api.conn_kwargs['ssl'])
+        self.assertEqual(60, region.backend.api.ttl_seconds)
+
+    def test_multiple_region_cache_configuration(self):
+        arguments1 = copy.copy(self.arguments)
+        arguments1['cache_collection'] = 'cache_region1'
+
+        region1 = dp_region.make_region().configure('keystone.cache.mongo',
+                                                    arguments=arguments1)
+        # There is no proxy so can access MongoCacheBackend directly
+        self.assertEqual('localhost:27017', region1.backend.api.hosts)
+        self.assertEqual('ks_cache', region1.backend.api.db_name)
+        self.assertEqual('cache_region1', region1.backend.api.cache_collection)
+        self.assertEqual('test_user', region1.backend.api.username)
+        self.assertEqual('test_password', region1.backend.api.password)
+        # Should be None because of delayed initialization
+        self.assertIsNone(region1.backend.api._data_manipulator)
+
+        random_key1 = uuid.uuid4().hex
+        region1.set(random_key1, "dummyValue10")
+        self.assertEqual("dummyValue10", region1.get(random_key1))
+        # Now should have initialized
+        self.assertIsInstance(region1.backend.api._data_manipulator,
+                              mongo.BaseTransform)
+
+        class_name = '%s.%s' % (MyTransformer.__module__, "MyTransformer")
+
+        arguments2 = copy.copy(self.arguments)
+        arguments2['cache_collection'] = 'cache_region2'
+        arguments2['son_manipulator'] = class_name
+
+        region2 = dp_region.make_region().configure('keystone.cache.mongo',
+                                                    arguments=arguments2)
+        # There is no proxy so can access MongoCacheBackend directly
+        self.assertEqual('localhost:27017', region2.backend.api.hosts)
+        self.assertEqual('ks_cache', region2.backend.api.db_name)
+        self.assertEqual('cache_region2', region2.backend.api.cache_collection)
+
+        # Should be None because of delayed initialization
+        self.assertIsNone(region2.backend.api._data_manipulator)
+
+        random_key = uuid.uuid4().hex
+        region2.set(random_key, "dummyValue20")
+        self.assertEqual("dummyValue20", region2.get(random_key))
+        # Now should have initialized
+        self.assertIsInstance(region2.backend.api._data_manipulator,
+                              MyTransformer)
+
+        region1.set(random_key1, "dummyValue22")
+        self.assertEqual("dummyValue22", region1.get(random_key1))
+
+    def test_typical_configuration(self):
+
+        dp_region.make_region().configure(
+            'keystone.cache.mongo',
+            arguments=self.arguments
+        )
+        self.assertTrue(True)  # reached here means no initialization error
+
+    def test_backend_get_missing_data(self):
+
+        region = dp_region.make_region().configure(
+            'keystone.cache.mongo',
+            arguments=self.arguments
+        )
+
+        random_key = uuid.uuid4().hex
+        # should return NO_VALUE as key does not exist in cache
+        self.assertEqual(api.NO_VALUE, region.get(random_key))
+
+    def test_backend_set_data(self):
+
+        region = dp_region.make_region().configure(
+            'keystone.cache.mongo',
+            arguments=self.arguments
+        )
+
+        random_key = uuid.uuid4().hex
+        region.set(random_key, "dummyValue")
+        self.assertEqual("dummyValue", region.get(random_key))
+
+    def test_backend_set_data_with_string_as_valid_ttl(self):
+
+        self.arguments['mongo_ttl_seconds'] = '3600'
+        region = dp_region.make_region().configure('keystone.cache.mongo',
+                                                   arguments=self.arguments)
+        self.assertEqual(3600, region.backend.api.ttl_seconds)
+        random_key = uuid.uuid4().hex
+        region.set(random_key, "dummyValue")
+        self.assertEqual("dummyValue", region.get(random_key))
+
+    def test_backend_set_data_with_int_as_valid_ttl(self):
+
+        self.arguments['mongo_ttl_seconds'] = 1800
+        region = dp_region.make_region().configure('keystone.cache.mongo',
+                                                   arguments=self.arguments)
+        self.assertEqual(1800, region.backend.api.ttl_seconds)
+        random_key = uuid.uuid4().hex
+        region.set(random_key, "dummyValue")
+        self.assertEqual("dummyValue", region.get(random_key))
+
+    def test_backend_set_none_as_data(self):
+
+        region = dp_region.make_region().configure(
+            'keystone.cache.mongo',
+            arguments=self.arguments
+        )
+
+        random_key = uuid.uuid4().hex
+        region.set(random_key, None)
+        self.assertIsNone(region.get(random_key))
+
+    def test_backend_set_blank_as_data(self):
+
+        region = dp_region.make_region().configure(
+            'keystone.cache.mongo',
+            arguments=self.arguments
+        )
+
+        random_key = uuid.uuid4().hex
+        region.set(random_key, "")
+        self.assertEqual("", region.get(random_key))
+
+    def test_backend_set_same_key_multiple_times(self):
+
+        region = dp_region.make_region().configure(
+            'keystone.cache.mongo',
+            arguments=self.arguments
+        )
+
+        random_key = uuid.uuid4().hex
+        region.set(random_key, "dummyValue")
+        self.assertEqual("dummyValue", region.get(random_key))
+
+        dict_value = {'key1': 'value1'}
+        region.set(random_key, dict_value)
+        self.assertEqual(dict_value, region.get(random_key))
+
+        region.set(random_key, "dummyValue2")
+        self.assertEqual("dummyValue2", region.get(random_key))
+
+    def test_backend_multi_set_data(self):
+
+        region = dp_region.make_region().configure(
+            'keystone.cache.mongo',
+            arguments=self.arguments
+        )
+        random_key = uuid.uuid4().hex
+        random_key1 = uuid.uuid4().hex
+        random_key2 = uuid.uuid4().hex
+        random_key3 = uuid.uuid4().hex
+        mapping = {random_key1: 'dummyValue1',
+                   random_key2: 'dummyValue2',
+                   random_key3: 'dummyValue3'}
+        region.set_multi(mapping)
+        # should return NO_VALUE as key does not exist in cache
+        self.assertEqual(api.NO_VALUE, region.get(random_key))
+        self.assertFalse(region.get(random_key))
+        self.assertEqual("dummyValue1", region.get(random_key1))
+        self.assertEqual("dummyValue2", region.get(random_key2))
+        self.assertEqual("dummyValue3", region.get(random_key3))
+
+    def test_backend_multi_get_data(self):
+
+        region = dp_region.make_region().configure(
+            'keystone.cache.mongo',
+            arguments=self.arguments
+        )
+        random_key = uuid.uuid4().hex
+        random_key1 = uuid.uuid4().hex
+        random_key2 = uuid.uuid4().hex
+        random_key3 = uuid.uuid4().hex
+        mapping = {random_key1: 'dummyValue1',
+                   random_key2: '',
+                   random_key3: 'dummyValue3'}
+        region.set_multi(mapping)
+
+        keys = [random_key, random_key1, random_key2, random_key3]
+        results = region.get_multi(keys)
+        # should return NO_VALUE as key does not exist in cache
+        self.assertEqual(api.NO_VALUE, results[0])
+        self.assertEqual("dummyValue1", results[1])
+        self.assertEqual("", results[2])
+        self.assertEqual("dummyValue3", results[3])
+
+    def test_backend_multi_set_should_update_existing(self):
+
+        region = dp_region.make_region().configure(
+            'keystone.cache.mongo',
+            arguments=self.arguments
+        )
+        random_key = uuid.uuid4().hex
+        random_key1 = uuid.uuid4().hex
+        random_key2 = uuid.uuid4().hex
+        random_key3 = uuid.uuid4().hex
+        mapping = {random_key1: 'dummyValue1',
+                   random_key2: 'dummyValue2',
+                   random_key3: 'dummyValue3'}
+        region.set_multi(mapping)
+        # should return NO_VALUE as key does not exist in cache
+        self.assertEqual(api.NO_VALUE, region.get(random_key))
+        self.assertEqual("dummyValue1", region.get(random_key1))
+        self.assertEqual("dummyValue2", region.get(random_key2))
+        self.assertEqual("dummyValue3", region.get(random_key3))
+
+        mapping = {random_key1: 'dummyValue4',
+                   random_key2: 'dummyValue5'}
+        region.set_multi(mapping)
+        self.assertEqual(api.NO_VALUE, region.get(random_key))
+        self.assertEqual("dummyValue4", region.get(random_key1))
+        self.assertEqual("dummyValue5", region.get(random_key2))
+        self.assertEqual("dummyValue3", region.get(random_key3))
+
+    def test_backend_multi_set_get_with_blanks_none(self):
+
+        region = dp_region.make_region().configure(
+            'keystone.cache.mongo',
+            arguments=self.arguments
+        )
+        random_key = uuid.uuid4().hex
+        random_key1 = uuid.uuid4().hex
+        random_key2 = uuid.uuid4().hex
+        random_key3 = uuid.uuid4().hex
+        random_key4 = uuid.uuid4().hex
+        mapping = {random_key1: 'dummyValue1',
+                   random_key2: None,
+                   random_key3: '',
+                   random_key4: 'dummyValue4'}
+        region.set_multi(mapping)
+        # should return NO_VALUE as key does not exist in cache
+        self.assertEqual(api.NO_VALUE, region.get(random_key))
+        self.assertEqual("dummyValue1", region.get(random_key1))
+        self.assertIsNone(region.get(random_key2))
+        self.assertEqual("", region.get(random_key3))
+        self.assertEqual("dummyValue4", region.get(random_key4))
+
+        keys = [random_key, random_key1, random_key2, random_key3, random_key4]
+        results = region.get_multi(keys)
+
+        # should return NO_VALUE as key does not exist in cache
+        self.assertEqual(api.NO_VALUE, results[0])
+        self.assertEqual("dummyValue1", results[1])
+        self.assertIsNone(results[2])
+        self.assertEqual("", results[3])
+        self.assertEqual("dummyValue4", results[4])
+
+        mapping = {random_key1: 'dummyValue5',
+                   random_key2: 'dummyValue6'}
+        region.set_multi(mapping)
+        self.assertEqual(api.NO_VALUE, region.get(random_key))
+        self.assertEqual("dummyValue5", region.get(random_key1))
+        self.assertEqual("dummyValue6", region.get(random_key2))
+        self.assertEqual("", region.get(random_key3))
+
+    def test_backend_delete_data(self):
+
+        region = dp_region.make_region().configure(
+            'keystone.cache.mongo',
+            arguments=self.arguments
+        )
+
+        random_key = uuid.uuid4().hex
+        region.set(random_key, "dummyValue")
+        self.assertEqual("dummyValue", region.get(random_key))
+
+        region.delete(random_key)
+        # should return NO_VALUE as key no longer exists in cache
+        self.assertEqual(api.NO_VALUE, region.get(random_key))
+
+    def test_backend_multi_delete_data(self):
+
+        region = dp_region.make_region().configure(
+            'keystone.cache.mongo',
+            arguments=self.arguments
+        )
+        random_key = uuid.uuid4().hex
+        random_key1 = uuid.uuid4().hex
+        random_key2 = uuid.uuid4().hex
+        random_key3 = uuid.uuid4().hex
+        mapping = {random_key1: 'dummyValue1',
+                   random_key2: 'dummyValue2',
+                   random_key3: 'dummyValue3'}
+        region.set_multi(mapping)
+        # should return NO_VALUE as key does not exist in cache
+        self.assertEqual(api.NO_VALUE, region.get(random_key))
+        self.assertEqual("dummyValue1", region.get(random_key1))
+        self.assertEqual("dummyValue2", region.get(random_key2))
+        self.assertEqual("dummyValue3", region.get(random_key3))
+        self.assertEqual(api.NO_VALUE, region.get("InvalidKey"))
+
+        keys = mapping.keys()
+
+        region.delete_multi(keys)
+
+        self.assertEqual(api.NO_VALUE, region.get("InvalidKey"))
+        # should return NO_VALUE as keys no longer exist in cache
+        self.assertEqual(api.NO_VALUE, region.get(random_key1))
+        self.assertEqual(api.NO_VALUE, region.get(random_key2))
+        self.assertEqual(api.NO_VALUE, region.get(random_key3))
+
+    def test_additional_crud_method_arguments_support(self):
+        """Additional arguments should works across find/insert/update."""
+
+        self.arguments['wtimeout'] = 30000
+        self.arguments['j'] = True
+        self.arguments['continue_on_error'] = True
+        self.arguments['secondary_acceptable_latency_ms'] = 60
+        region = dp_region.make_region().configure(
+            'keystone.cache.mongo',
+            arguments=self.arguments
+        )
+
+        # There is no proxy so can access MongoCacheBackend directly
+        api_methargs = region.backend.api.meth_kwargs
+        self.assertEqual(30000, api_methargs['wtimeout'])
+        self.assertEqual(True, api_methargs['j'])
+        self.assertEqual(True, api_methargs['continue_on_error'])
+        self.assertEqual(60, api_methargs['secondary_acceptable_latency_ms'])
+
+        random_key = uuid.uuid4().hex
+        region.set(random_key, "dummyValue1")
+        self.assertEqual("dummyValue1", region.get(random_key))
+
+        region.set(random_key, "dummyValue2")
+        self.assertEqual("dummyValue2", region.get(random_key))
+
+        random_key = uuid.uuid4().hex
+        region.set(random_key, "dummyValue3")
+        self.assertEqual("dummyValue3", region.get(random_key))
diff --git a/keystone-moon/keystone/tests/unit/test_catalog.py b/keystone-moon/keystone/tests/unit/test_catalog.py
new file mode 100644 (file)
index 0000000..9dda5d8
--- /dev/null
@@ -0,0 +1,219 @@
+# Copyright 2013 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import uuid
+
+import six
+
+from keystone import catalog
+from keystone.tests import unit as tests
+from keystone.tests.unit.ksfixtures import database
+from keystone.tests.unit import rest
+
+
+BASE_URL = 'http://127.0.0.1:35357/v2'
+SERVICE_FIXTURE = object()
+
+
+class V2CatalogTestCase(rest.RestfulTestCase):
+    def setUp(self):
+        super(V2CatalogTestCase, self).setUp()
+        self.useFixture(database.Database())
+
+        self.service_id = uuid.uuid4().hex
+        self.service = self.new_service_ref()
+        self.service['id'] = self.service_id
+        self.catalog_api.create_service(
+            self.service_id,
+            self.service.copy())
+
+        # TODO(termie): add an admin user to the fixtures and use that user
+        # override the fixtures, for now
+        self.assignment_api.add_role_to_user_and_project(
+            self.user_foo['id'],
+            self.tenant_bar['id'],
+            self.role_admin['id'])
+
+    def config_overrides(self):
+        super(V2CatalogTestCase, self).config_overrides()
+        self.config_fixture.config(
+            group='catalog',
+            driver='keystone.catalog.backends.sql.Catalog')
+
+    def new_ref(self):
+        """Populates a ref with attributes common to all API entities."""
+        return {
+            'id': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+            'enabled': True}
+
+    def new_service_ref(self):
+        ref = self.new_ref()
+        ref['type'] = uuid.uuid4().hex
+        return ref
+
+    def _get_token_id(self, r):
+        """Applicable only to JSON."""
+        return r.result['access']['token']['id']
+
+    def _endpoint_create(self, expected_status=200, service_id=SERVICE_FIXTURE,
+                         publicurl='http://localhost:8080',
+                         internalurl='http://localhost:8080',
+                         adminurl='http://localhost:8080'):
+        if service_id is SERVICE_FIXTURE:
+            service_id = self.service_id
+        # FIXME(dolph): expected status should actually be 201 Created
+        path = '/v2.0/endpoints'
+        body = {
+            'endpoint': {
+                'adminurl': adminurl,
+                'service_id': service_id,
+                'region': 'RegionOne',
+                'internalurl': internalurl,
+                'publicurl': publicurl
+            }
+        }
+
+        r = self.admin_request(method='POST', token=self.get_scoped_token(),
+                               path=path, expected_status=expected_status,
+                               body=body)
+        return body, r
+
+    def test_endpoint_create(self):
+        req_body, response = self._endpoint_create()
+        self.assertIn('endpoint', response.result)
+        self.assertIn('id', response.result['endpoint'])
+        for field, value in six.iteritems(req_body['endpoint']):
+            self.assertEqual(response.result['endpoint'][field], value)
+
+    def test_endpoint_create_with_null_adminurl(self):
+        req_body, response = self._endpoint_create(adminurl=None)
+        self.assertIsNone(req_body['endpoint']['adminurl'])
+        self.assertNotIn('adminurl', response.result['endpoint'])
+
+    def test_endpoint_create_with_empty_adminurl(self):
+        req_body, response = self._endpoint_create(adminurl='')
+        self.assertEqual('', req_body['endpoint']['adminurl'])
+        self.assertNotIn("adminurl", response.result['endpoint'])
+
+    def test_endpoint_create_with_null_internalurl(self):
+        req_body, response = self._endpoint_create(internalurl=None)
+        self.assertIsNone(req_body['endpoint']['internalurl'])
+        self.assertNotIn('internalurl', response.result['endpoint'])
+
+    def test_endpoint_create_with_empty_internalurl(self):
+        req_body, response = self._endpoint_create(internalurl='')
+        self.assertEqual('', req_body['endpoint']['internalurl'])
+        self.assertNotIn("internalurl", response.result['endpoint'])
+
+    def test_endpoint_create_with_null_publicurl(self):
+        self._endpoint_create(expected_status=400, publicurl=None)
+
+    def test_endpoint_create_with_empty_publicurl(self):
+        self._endpoint_create(expected_status=400, publicurl='')
+
+    def test_endpoint_create_with_null_service_id(self):
+        self._endpoint_create(expected_status=400, service_id=None)
+
+    def test_endpoint_create_with_empty_service_id(self):
+        self._endpoint_create(expected_status=400, service_id='')
+
+
+class TestV2CatalogAPISQL(tests.TestCase):
+
+    def setUp(self):
+        super(TestV2CatalogAPISQL, self).setUp()
+        self.useFixture(database.Database())
+        self.catalog_api = catalog.Manager()
+
+        self.service_id = uuid.uuid4().hex
+        service = {'id': self.service_id, 'name': uuid.uuid4().hex}
+        self.catalog_api.create_service(self.service_id, service)
+
+        endpoint = self.new_endpoint_ref(service_id=self.service_id)
+        self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+
+    def config_overrides(self):
+        super(TestV2CatalogAPISQL, self).config_overrides()
+        self.config_fixture.config(
+            group='catalog',
+            driver='keystone.catalog.backends.sql.Catalog')
+
+    def new_endpoint_ref(self, service_id):
+        return {
+            'id': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+            'interface': uuid.uuid4().hex[:8],
+            'service_id': service_id,
+            'url': uuid.uuid4().hex,
+            'region': uuid.uuid4().hex,
+        }
+
+    def test_get_catalog_ignores_endpoints_with_invalid_urls(self):
+        user_id = uuid.uuid4().hex
+        tenant_id = uuid.uuid4().hex
+
+        # the only endpoint in the catalog is the one created in setUp
+        catalog = self.catalog_api.get_catalog(user_id, tenant_id)
+        self.assertEqual(1, len(catalog))
+        # it's also the only endpoint in the backend
+        self.assertEqual(1, len(self.catalog_api.list_endpoints()))
+
+        # create a new, invalid endpoint - malformed type declaration
+        endpoint = self.new_endpoint_ref(self.service_id)
+        endpoint['url'] = 'http://keystone/%(tenant_id)'
+        self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+
+        # create a new, invalid endpoint - nonexistent key
+        endpoint = self.new_endpoint_ref(self.service_id)
+        endpoint['url'] = 'http://keystone/%(you_wont_find_me)s'
+        self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+
+        # verify that the invalid endpoints don't appear in the catalog
+        catalog = self.catalog_api.get_catalog(user_id, tenant_id)
+        self.assertEqual(1, len(catalog))
+        # all three endpoints appear in the backend
+        self.assertEqual(3, len(self.catalog_api.list_endpoints()))
+
+    def test_get_catalog_always_returns_service_name(self):
+        user_id = uuid.uuid4().hex
+        tenant_id = uuid.uuid4().hex
+
+        # create a service, with a name
+        named_svc = {
+            'id': uuid.uuid4().hex,
+            'type': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+        }
+        self.catalog_api.create_service(named_svc['id'], named_svc)
+        endpoint = self.new_endpoint_ref(service_id=named_svc['id'])
+        self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+
+        # create a service, with no name
+        unnamed_svc = {
+            'id': uuid.uuid4().hex,
+            'type': uuid.uuid4().hex
+        }
+        self.catalog_api.create_service(unnamed_svc['id'], unnamed_svc)
+        endpoint = self.new_endpoint_ref(service_id=unnamed_svc['id'])
+        self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+
+        region = None
+        catalog = self.catalog_api.get_catalog(user_id, tenant_id)
+
+        self.assertEqual(named_svc['name'],
+                         catalog[region][named_svc['type']]['name'])
+        self.assertEqual('', catalog[region][unnamed_svc['type']]['name'])
diff --git a/keystone-moon/keystone/tests/unit/test_cert_setup.py b/keystone-moon/keystone/tests/unit/test_cert_setup.py
new file mode 100644 (file)
index 0000000..d1e9ccf
--- /dev/null
@@ -0,0 +1,246 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import shutil
+
+import mock
+from testtools import matchers
+
+from keystone.common import environment
+from keystone.common import openssl
+from keystone import exception
+from keystone.tests import unit as tests
+from keystone.tests.unit import rest
+from keystone import token
+
+
+SSLDIR = tests.dirs.tmp('ssl')
+CONF = tests.CONF
+DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
+
+
+CERTDIR = os.path.join(SSLDIR, 'certs')
+KEYDIR = os.path.join(SSLDIR, 'private')
+
+
+class CertSetupTestCase(rest.RestfulTestCase):
+
+    def setUp(self):
+        super(CertSetupTestCase, self).setUp()
+
+        def cleanup_ssldir():
+            try:
+                shutil.rmtree(SSLDIR)
+            except OSError:
+                pass
+
+        self.addCleanup(cleanup_ssldir)
+
+    def config_overrides(self):
+        super(CertSetupTestCase, self).config_overrides()
+        ca_certs = os.path.join(CERTDIR, 'ca.pem')
+        ca_key = os.path.join(CERTDIR, 'cakey.pem')
+
+        self.config_fixture.config(
+            group='signing',
+            certfile=os.path.join(CERTDIR, 'signing_cert.pem'),
+            ca_certs=ca_certs,
+            ca_key=ca_key,
+            keyfile=os.path.join(KEYDIR, 'signing_key.pem'))
+        self.config_fixture.config(
+            group='ssl',
+            ca_key=ca_key)
+        self.config_fixture.config(
+            group='eventlet_server_ssl',
+            ca_certs=ca_certs,
+            certfile=os.path.join(CERTDIR, 'keystone.pem'),
+            keyfile=os.path.join(KEYDIR, 'keystonekey.pem'))
+        self.config_fixture.config(
+            group='token',
+            provider='keystone.token.providers.pkiz.Provider')
+
+    def test_can_handle_missing_certs(self):
+        controller = token.controllers.Auth()
+
+        self.config_fixture.config(group='signing', certfile='invalid')
+        password = 'fake1'
+        user = {
+            'name': 'fake1',
+            'password': password,
+            'domain_id': DEFAULT_DOMAIN_ID
+        }
+        user = self.identity_api.create_user(user)
+        body_dict = {
+            'passwordCredentials': {
+                'userId': user['id'],
+                'password': password,
+            },
+        }
+        self.assertRaises(exception.UnexpectedError,
+                          controller.authenticate,
+                          {}, body_dict)
+
+    def test_create_pki_certs(self, rebuild=False):
+        pki = openssl.ConfigurePKI(None, None, rebuild=rebuild)
+        pki.run()
+        self.assertTrue(os.path.exists(CONF.signing.certfile))
+        self.assertTrue(os.path.exists(CONF.signing.ca_certs))
+        self.assertTrue(os.path.exists(CONF.signing.keyfile))
+
+    def test_create_ssl_certs(self, rebuild=False):
+        ssl = openssl.ConfigureSSL(None, None, rebuild=rebuild)
+        ssl.run()
+        self.assertTrue(os.path.exists(CONF.eventlet_server_ssl.ca_certs))
+        self.assertTrue(os.path.exists(CONF.eventlet_server_ssl.certfile))
+        self.assertTrue(os.path.exists(CONF.eventlet_server_ssl.keyfile))
+
+    def test_fetch_signing_cert(self, rebuild=False):
+        pki = openssl.ConfigurePKI(None, None, rebuild=rebuild)
+        pki.run()
+
+        # NOTE(jamielennox): Use request directly because certificate
+        # requests don't have some of the normal information
+        signing_resp = self.request(self.public_app,
+                                    '/v2.0/certificates/signing',
+                                    method='GET', expected_status=200)
+
+        cacert_resp = self.request(self.public_app,
+                                   '/v2.0/certificates/ca',
+                                   method='GET', expected_status=200)
+
+        with open(CONF.signing.certfile) as f:
+            self.assertEqual(f.read(), signing_resp.text)
+
+        with open(CONF.signing.ca_certs) as f:
+            self.assertEqual(f.read(), cacert_resp.text)
+
+        # NOTE(jamielennox): This is weird behaviour that we need to enforce.
+        # It doesn't matter what you ask for it's always going to give text
+        # with a text/html content_type.
+
+        for path in ['/v2.0/certificates/signing', '/v2.0/certificates/ca']:
+            for accept in [None, 'text/html', 'application/json', 'text/xml']:
+                headers = {'Accept': accept} if accept else {}
+                resp = self.request(self.public_app, path, method='GET',
+                                    expected_status=200,
+                                    headers=headers)
+
+                self.assertEqual('text/html', resp.content_type)
+
+    def test_fetch_signing_cert_when_rebuild(self):
+        pki = openssl.ConfigurePKI(None, None)
+        pki.run()
+        self.test_fetch_signing_cert(rebuild=True)
+
+    def test_failure(self):
+        for path in ['/v2.0/certificates/signing', '/v2.0/certificates/ca']:
+            self.request(self.public_app, path, method='GET',
+                         expected_status=500)
+
+    def test_pki_certs_rebuild(self):
+        self.test_create_pki_certs()
+        with open(CONF.signing.certfile) as f:
+            cert_file1 = f.read()
+
+        self.test_create_pki_certs(rebuild=True)
+        with open(CONF.signing.certfile) as f:
+            cert_file2 = f.read()
+
+        self.assertNotEqual(cert_file1, cert_file2)
+
+    def test_ssl_certs_rebuild(self):
+        self.test_create_ssl_certs()
+        with open(CONF.eventlet_server_ssl.certfile) as f:
+            cert_file1 = f.read()
+
+        self.test_create_ssl_certs(rebuild=True)
+        with open(CONF.eventlet_server_ssl.certfile) as f:
+            cert_file2 = f.read()
+
+        self.assertNotEqual(cert_file1, cert_file2)
+
+    @mock.patch.object(os, 'remove')
+    def test_rebuild_pki_certs_remove_error(self, mock_remove):
+        self.test_create_pki_certs()
+        with open(CONF.signing.certfile) as f:
+            cert_file1 = f.read()
+
+        mock_remove.side_effect = OSError()
+        self.test_create_pki_certs(rebuild=True)
+        with open(CONF.signing.certfile) as f:
+            cert_file2 = f.read()
+
+        self.assertEqual(cert_file1, cert_file2)
+
+    @mock.patch.object(os, 'remove')
+    def test_rebuild_ssl_certs_remove_error(self, mock_remove):
+        self.test_create_ssl_certs()
+        with open(CONF.eventlet_server_ssl.certfile) as f:
+            cert_file1 = f.read()
+
+        mock_remove.side_effect = OSError()
+        self.test_create_ssl_certs(rebuild=True)
+        with open(CONF.eventlet_server_ssl.certfile) as f:
+            cert_file2 = f.read()
+
+        self.assertEqual(cert_file1, cert_file2)
+
+    def test_create_pki_certs_twice_without_rebuild(self):
+        self.test_create_pki_certs()
+        with open(CONF.signing.certfile) as f:
+            cert_file1 = f.read()
+
+        self.test_create_pki_certs()
+        with open(CONF.signing.certfile) as f:
+            cert_file2 = f.read()
+
+        self.assertEqual(cert_file1, cert_file2)
+
+    def test_create_ssl_certs_twice_without_rebuild(self):
+        self.test_create_ssl_certs()
+        with open(CONF.eventlet_server_ssl.certfile) as f:
+            cert_file1 = f.read()
+
+        self.test_create_ssl_certs()
+        with open(CONF.eventlet_server_ssl.certfile) as f:
+            cert_file2 = f.read()
+
+        self.assertEqual(cert_file1, cert_file2)
+
+
+class TestExecCommand(tests.TestCase):
+
+    @mock.patch.object(environment.subprocess.Popen, 'poll')
+    def test_running_a_successful_command(self, mock_poll):
+        mock_poll.return_value = 0
+
+        ssl = openssl.ConfigureSSL('keystone_user', 'keystone_group')
+        ssl.exec_command(['ls'])
+
+    @mock.patch.object(environment.subprocess.Popen, 'communicate')
+    @mock.patch.object(environment.subprocess.Popen, 'poll')
+    def test_running_an_invalid_command(self, mock_poll, mock_communicate):
+        output = 'this is the output string'
+
+        mock_communicate.return_value = (output, '')
+        mock_poll.return_value = 1
+
+        cmd = ['ls']
+        ssl = openssl.ConfigureSSL('keystone_user', 'keystone_group')
+        e = self.assertRaises(environment.subprocess.CalledProcessError,
+                              ssl.exec_command,
+                              cmd)
+        self.assertThat(e.output, matchers.Equals(output))
diff --git a/keystone-moon/keystone/tests/unit/test_cli.py b/keystone-moon/keystone/tests/unit/test_cli.py
new file mode 100644 (file)
index 0000000..20aa03e
--- /dev/null
@@ -0,0 +1,252 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import uuid
+
+import mock
+from oslo_config import cfg
+
+from keystone import cli
+from keystone.common import dependency
+from keystone.i18n import _
+from keystone import resource
+from keystone.tests import unit as tests
+from keystone.tests.unit.ksfixtures import database
+
+CONF = cfg.CONF
+
+
+class CliTestCase(tests.SQLDriverOverrides, tests.TestCase):
+    def config_files(self):
+        config_files = super(CliTestCase, self).config_files()
+        config_files.append(tests.dirs.tests_conf('backend_sql.conf'))
+        return config_files
+
+    def test_token_flush(self):
+        self.useFixture(database.Database())
+        self.load_backends()
+        cli.TokenFlush.main()
+
+
+class CliDomainConfigAllTestCase(tests.SQLDriverOverrides, tests.TestCase):
+
+    def setUp(self):
+        self.useFixture(database.Database())
+        super(CliDomainConfigAllTestCase, self).setUp()
+        self.load_backends()
+        self.config_fixture.config(
+            group='identity',
+            domain_config_dir=tests.TESTCONF + '/domain_configs_multi_ldap')
+        self.domain_count = 3
+        self.setup_initial_domains()
+
+    def config_files(self):
+        self.config_fixture.register_cli_opt(cli.command_opt)
+        self.addCleanup(self.cleanup)
+        config_files = super(CliDomainConfigAllTestCase, self).config_files()
+        config_files.append(tests.dirs.tests_conf('backend_sql.conf'))
+        return config_files
+
+    def cleanup(self):
+        CONF.reset()
+        CONF.unregister_opt(cli.command_opt)
+
+    def cleanup_domains(self):
+        for domain in self.domains:
+            if domain == 'domain_default':
+                # Not allowed to delete the default domain, but should at least
+                # delete any domain-specific config for it.
+                self.domain_config_api.delete_config(
+                    CONF.identity.default_domain_id)
+                continue
+            this_domain = self.domains[domain]
+            this_domain['enabled'] = False
+            self.resource_api.update_domain(this_domain['id'], this_domain)
+            self.resource_api.delete_domain(this_domain['id'])
+        self.domains = {}
+
+    def config(self, config_files):
+        CONF(args=['domain_config_upload', '--all'], project='keystone',
+             default_config_files=config_files)
+
+    def setup_initial_domains(self):
+
+        def create_domain(domain):
+            return self.resource_api.create_domain(domain['id'], domain)
+
+        self.domains = {}
+        self.addCleanup(self.cleanup_domains)
+        for x in range(1, self.domain_count):
+            domain = 'domain%s' % x
+            self.domains[domain] = create_domain(
+                {'id': uuid.uuid4().hex, 'name': domain})
+        self.domains['domain_default'] = create_domain(
+            resource.calc_default_domain())
+
+    def test_config_upload(self):
+        # The values below are the same as in the domain_configs_multi_ldap
+        # directory of test config_files.
+        default_config = {
+            'ldap': {'url': 'fake://memory',
+                     'user': 'cn=Admin',
+                     'password': 'password',
+                     'suffix': 'cn=example,cn=com'},
+            'identity': {'driver': 'keystone.identity.backends.ldap.Identity'}
+        }
+        domain1_config = {
+            'ldap': {'url': 'fake://memory1',
+                     'user': 'cn=Admin',
+                     'password': 'password',
+                     'suffix': 'cn=example,cn=com'},
+            'identity': {'driver': 'keystone.identity.backends.ldap.Identity'}
+        }
+        domain2_config = {
+            'ldap': {'url': 'fake://memory',
+                     'user': 'cn=Admin',
+                     'password': 'password',
+                     'suffix': 'cn=myroot,cn=com',
+                     'group_tree_dn': 'ou=UserGroups,dc=myroot,dc=org',
+                     'user_tree_dn': 'ou=Users,dc=myroot,dc=org'},
+            'identity': {'driver': 'keystone.identity.backends.ldap.Identity'}
+        }
+
+        # Clear backend dependencies, since cli loads these manually
+        dependency.reset()
+        cli.DomainConfigUpload.main()
+
+        res = self.domain_config_api.get_config_with_sensitive_info(
+            CONF.identity.default_domain_id)
+        self.assertEqual(default_config, res)
+        res = self.domain_config_api.get_config_with_sensitive_info(
+            self.domains['domain1']['id'])
+        self.assertEqual(domain1_config, res)
+        res = self.domain_config_api.get_config_with_sensitive_info(
+            self.domains['domain2']['id'])
+        self.assertEqual(domain2_config, res)
+
+
+class CliDomainConfigSingleDomainTestCase(CliDomainConfigAllTestCase):
+
+    def config(self, config_files):
+        CONF(args=['domain_config_upload', '--domain-name', 'Default'],
+             project='keystone', default_config_files=config_files)
+
+    def test_config_upload(self):
+        # The values below are the same as in the domain_configs_multi_ldap
+        # directory of test config_files.
+        default_config = {
+            'ldap': {'url': 'fake://memory',
+                     'user': 'cn=Admin',
+                     'password': 'password',
+                     'suffix': 'cn=example,cn=com'},
+            'identity': {'driver': 'keystone.identity.backends.ldap.Identity'}
+        }
+
+        # Clear backend dependencies, since cli loads these manually
+        dependency.reset()
+        cli.DomainConfigUpload.main()
+
+        res = self.domain_config_api.get_config_with_sensitive_info(
+            CONF.identity.default_domain_id)
+        self.assertEqual(default_config, res)
+        res = self.domain_config_api.get_config_with_sensitive_info(
+            self.domains['domain1']['id'])
+        self.assertEqual({}, res)
+        res = self.domain_config_api.get_config_with_sensitive_info(
+            self.domains['domain2']['id'])
+        self.assertEqual({}, res)
+
+    def test_no_overwrite_config(self):
+        # Create a config for the default domain
+        default_config = {
+            'ldap': {'url': uuid.uuid4().hex},
+            'identity': {'driver': 'keystone.identity.backends.ldap.Identity'}
+        }
+        self.domain_config_api.create_config(
+            CONF.identity.default_domain_id, default_config)
+
+        # Now try and upload the settings in the configuration file for the
+        # default domain
+        dependency.reset()
+        with mock.patch('__builtin__.print') as mock_print:
+            self.assertRaises(SystemExit, cli.DomainConfigUpload.main)
+            file_name = ('keystone.%s.conf' %
+                         resource.calc_default_domain()['name'])
+            error_msg = _(
+                'Domain: %(domain)s already has a configuration defined - '
+                'ignoring file: %(file)s.') % {
+                    'domain': resource.calc_default_domain()['name'],
+                    'file': os.path.join(CONF.identity.domain_config_dir,
+                                         file_name)}
+            mock_print.assert_has_calls([mock.call(error_msg)])
+
+        res = self.domain_config_api.get_config(
+            CONF.identity.default_domain_id)
+        # The initial config should not have been overwritten
+        self.assertEqual(default_config, res)
+
+
+class CliDomainConfigNoOptionsTestCase(CliDomainConfigAllTestCase):
+
+    def config(self, config_files):
+        CONF(args=['domain_config_upload'],
+             project='keystone', default_config_files=config_files)
+
+    def test_config_upload(self):
+        dependency.reset()
+        with mock.patch('__builtin__.print') as mock_print:
+            self.assertRaises(SystemExit, cli.DomainConfigUpload.main)
+            mock_print.assert_has_calls(
+                [mock.call(
+                    _('At least one option must be provided, use either '
+                      '--all or --domain-name'))])
+
+
+class CliDomainConfigTooManyOptionsTestCase(CliDomainConfigAllTestCase):
+
+    def config(self, config_files):
+        CONF(args=['domain_config_upload', '--all', '--domain-name',
+                   'Default'],
+             project='keystone', default_config_files=config_files)
+
+    def test_config_upload(self):
+        dependency.reset()
+        with mock.patch('__builtin__.print') as mock_print:
+            self.assertRaises(SystemExit, cli.DomainConfigUpload.main)
+            mock_print.assert_has_calls(
+                [mock.call(_('The --all option cannot be used with '
+                             'the --domain-name option'))])
+
+
+class CliDomainConfigInvalidDomainTestCase(CliDomainConfigAllTestCase):
+
+    def config(self, config_files):
+        self.invalid_domain_name = uuid.uuid4().hex
+        CONF(args=['domain_config_upload', '--domain-name',
+                   self.invalid_domain_name],
+             project='keystone', default_config_files=config_files)
+
+    def test_config_upload(self):
+        dependency.reset()
+        with mock.patch('__builtin__.print') as mock_print:
+            self.assertRaises(SystemExit, cli.DomainConfigUpload.main)
+            file_name = 'keystone.%s.conf' % self.invalid_domain_name
+            error_msg = (_(
+                'Invalid domain name: %(domain)s found in config file name: '
+                '%(file)s - ignoring this file.') % {
+                    'domain': self.invalid_domain_name,
+                    'file': os.path.join(CONF.identity.domain_config_dir,
+                                         file_name)})
+            mock_print.assert_has_calls([mock.call(error_msg)])
diff --git a/keystone-moon/keystone/tests/unit/test_config.py b/keystone-moon/keystone/tests/unit/test_config.py
new file mode 100644 (file)
index 0000000..15cfac8
--- /dev/null
@@ -0,0 +1,84 @@
+# Copyright 2013 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import uuid
+
+from oslo_config import cfg
+
+from keystone import config
+from keystone import exception
+from keystone.tests import unit as tests
+
+
+CONF = cfg.CONF
+
+
+class ConfigTestCase(tests.TestCase):
+
+    def config_files(self):
+        config_files = super(ConfigTestCase, self).config_files()
+        # Insert the keystone sample as the first config file to be loaded
+        # since it is used in one of the code paths to determine the paste-ini
+        # location.
+        config_files.insert(0, tests.dirs.etc('keystone.conf.sample'))
+        return config_files
+
+    def test_paste_config(self):
+        self.assertEqual(tests.dirs.etc('keystone-paste.ini'),
+                         config.find_paste_config())
+        self.config_fixture.config(group='paste_deploy',
+                                   config_file=uuid.uuid4().hex)
+        self.assertRaises(exception.ConfigFileNotFound,
+                          config.find_paste_config)
+        self.config_fixture.config(group='paste_deploy', config_file='')
+        self.assertEqual(tests.dirs.etc('keystone.conf.sample'),
+                         config.find_paste_config())
+
+    def test_config_default(self):
+        self.assertEqual('keystone.auth.plugins.password.Password',
+                         CONF.auth.password)
+        self.assertEqual('keystone.auth.plugins.token.Token',
+                         CONF.auth.token)
+
+
+class DeprecatedTestCase(tests.TestCase):
+    """Test using the original (deprecated) name for renamed options."""
+
+    def config_files(self):
+        config_files = super(DeprecatedTestCase, self).config_files()
+        config_files.append(tests.dirs.tests_conf('deprecated.conf'))
+        return config_files
+
+    def test_sql(self):
+        # Options in [sql] were moved to [database] in Icehouse for the change
+        # to use oslo-incubator's db.sqlalchemy.sessions.
+
+        self.assertEqual('sqlite://deprecated', CONF.database.connection)
+        self.assertEqual(54321, CONF.database.idle_timeout)
+
+
+class DeprecatedOverrideTestCase(tests.TestCase):
+    """Test using the deprecated AND new name for renamed options."""
+
+    def config_files(self):
+        config_files = super(DeprecatedOverrideTestCase, self).config_files()
+        config_files.append(tests.dirs.tests_conf('deprecated_override.conf'))
+        return config_files
+
+    def test_sql(self):
+        # Options in [sql] were moved to [database] in Icehouse for the change
+        # to use oslo-incubator's db.sqlalchemy.sessions.
+
+        self.assertEqual('sqlite://new', CONF.database.connection)
+        self.assertEqual(65432, CONF.database.idle_timeout)
diff --git a/keystone-moon/keystone/tests/unit/test_contrib_s3_core.py b/keystone-moon/keystone/tests/unit/test_contrib_s3_core.py
new file mode 100644 (file)
index 0000000..43ea1ac
--- /dev/null
@@ -0,0 +1,55 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from keystone.contrib import s3
+from keystone import exception
+from keystone.tests import unit as tests
+
+
+class S3ContribCore(tests.TestCase):
+    def setUp(self):
+        super(S3ContribCore, self).setUp()
+
+        self.load_backends()
+
+        self.controller = s3.S3Controller()
+
+    def test_good_signature(self):
+        creds_ref = {'secret':
+                     'b121dd41cdcc42fe9f70e572e84295aa'}
+        credentials = {'token':
+                       'UFVUCjFCMk0yWThBc2dUcGdBbVk3UGhDZmc9PQphcHB'
+                       'saWNhdGlvbi9vY3RldC1zdHJlYW0KVHVlLCAxMSBEZWMgMjAxM'
+                       'iAyMTo0MTo0MSBHTVQKL2NvbnRfczMvdXBsb2FkZWRfZnJ'
+                       'vbV9zMy50eHQ=',
+                       'signature': 'IL4QLcLVaYgylF9iHj6Wb8BGZsw='}
+
+        self.assertIsNone(self.controller.check_signature(creds_ref,
+                                                          credentials))
+
+    def test_bad_signature(self):
+        creds_ref = {'secret':
+                     'b121dd41cdcc42fe9f70e572e84295aa'}
+        credentials = {'token':
+                       'UFVUCjFCMk0yWThBc2dUcGdBbVk3UGhDZmc9PQphcHB'
+                       'saWNhdGlvbi9vY3RldC1zdHJlYW0KVHVlLCAxMSBEZWMgMjAxM'
+                       'iAyMTo0MTo0MSBHTVQKL2NvbnRfczMvdXBsb2FkZWRfZnJ'
+                       'vbV9zMy50eHQ=',
+                       'signature': uuid.uuid4().hex}
+
+        self.assertRaises(exception.Unauthorized,
+                          self.controller.check_signature,
+                          creds_ref, credentials)
diff --git a/keystone-moon/keystone/tests/unit/test_contrib_simple_cert.py b/keystone-moon/keystone/tests/unit/test_contrib_simple_cert.py
new file mode 100644 (file)
index 0000000..8664e2c
--- /dev/null
@@ -0,0 +1,57 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from keystone.tests.unit import test_v3
+
+
+class BaseTestCase(test_v3.RestfulTestCase):
+
+    EXTENSION_TO_ADD = 'simple_cert_extension'
+
+    CA_PATH = '/v3/OS-SIMPLE-CERT/ca'
+    CERT_PATH = '/v3/OS-SIMPLE-CERT/certificates'
+
+
+class TestSimpleCert(BaseTestCase):
+
+    def request_cert(self, path):
+        content_type = 'application/x-pem-file'
+        response = self.request(app=self.public_app,
+                                method='GET',
+                                path=path,
+                                headers={'Accept': content_type},
+                                expected_status=200)
+
+        self.assertEqual(content_type, response.content_type.lower())
+        self.assertIn('---BEGIN', response.body)
+
+        return response
+
+    def test_ca_cert(self):
+        self.request_cert(self.CA_PATH)
+
+    def test_signing_cert(self):
+        self.request_cert(self.CERT_PATH)
+
+    def test_missing_file(self):
+        # these files do not exist
+        self.config_fixture.config(group='signing',
+                                   ca_certs=uuid.uuid4().hex,
+                                   certfile=uuid.uuid4().hex)
+
+        for path in [self.CA_PATH, self.CERT_PATH]:
+            self.request(app=self.public_app,
+                         method='GET',
+                         path=path,
+                         expected_status=500)
diff --git a/keystone-moon/keystone/tests/unit/test_driver_hints.py b/keystone-moon/keystone/tests/unit/test_driver_hints.py
new file mode 100644 (file)
index 0000000..c20d2ae
--- /dev/null
@@ -0,0 +1,60 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import driver_hints
+from keystone.tests.unit import core as test
+
+
+class ListHintsTests(test.TestCase):
+
+    def test_create_iterate_satisfy(self):
+        hints = driver_hints.Hints()
+        hints.add_filter('t1', 'data1')
+        hints.add_filter('t2', 'data2')
+        self.assertEqual(2, len(hints.filters))
+        filter = hints.get_exact_filter_by_name('t1')
+        self.assertEqual('t1', filter['name'])
+        self.assertEqual('data1', filter['value'])
+        self.assertEqual('equals', filter['comparator'])
+        self.assertEqual(False, filter['case_sensitive'])
+
+        hints.filters.remove(filter)
+        filter_count = 0
+        for filter in hints.filters:
+            filter_count += 1
+            self.assertEqual('t2', filter['name'])
+        self.assertEqual(1, filter_count)
+
+    def test_multiple_creates(self):
+        hints = driver_hints.Hints()
+        hints.add_filter('t1', 'data1')
+        hints.add_filter('t2', 'data2')
+        self.assertEqual(2, len(hints.filters))
+        hints2 = driver_hints.Hints()
+        hints2.add_filter('t4', 'data1')
+        hints2.add_filter('t5', 'data2')
+        self.assertEqual(2, len(hints.filters))
+
+    def test_limits(self):
+        hints = driver_hints.Hints()
+        self.assertIsNone(hints.limit)
+        hints.set_limit(10)
+        self.assertEqual(10, hints.limit['limit'])
+        self.assertFalse(hints.limit['truncated'])
+        hints.set_limit(11)
+        self.assertEqual(11, hints.limit['limit'])
+        self.assertFalse(hints.limit['truncated'])
+        hints.set_limit(10, truncated=True)
+        self.assertEqual(10, hints.limit['limit'])
+        self.assertTrue(hints.limit['truncated'])
diff --git a/keystone-moon/keystone/tests/unit/test_ec2_token_middleware.py b/keystone-moon/keystone/tests/unit/test_ec2_token_middleware.py
new file mode 100644 (file)
index 0000000..03c95e2
--- /dev/null
@@ -0,0 +1,34 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystonemiddleware import ec2_token as ksm_ec2_token
+
+from keystone.middleware import ec2_token
+from keystone.tests import unit as tests
+
+
+class EC2TokenMiddlewareTestBase(tests.BaseTestCase):
+    def test_symbols(self):
+        """Verify ec2 middleware symbols.
+
+        Verify that the keystone version of ec2_token middleware forwards the
+        public symbols from the keystonemiddleware version of the ec2_token
+        middleware for backwards compatibility.
+
+        """
+
+        self.assertIs(ksm_ec2_token.app_factory, ec2_token.app_factory)
+        self.assertIs(ksm_ec2_token.filter_factory, ec2_token.filter_factory)
+        self.assertTrue(
+            issubclass(ec2_token.EC2Token, ksm_ec2_token.EC2Token),
+            'ec2_token.EC2Token is not subclass of '
+            'keystonemiddleware.ec2_token.EC2Token')
diff --git a/keystone-moon/keystone/tests/unit/test_exception.py b/keystone-moon/keystone/tests/unit/test_exception.py
new file mode 100644 (file)
index 0000000..f91fa2a
--- /dev/null
@@ -0,0 +1,227 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from oslo_config import cfg
+from oslo_config import fixture as config_fixture
+from oslo_serialization import jsonutils
+import six
+
+from keystone.common import wsgi
+from keystone import exception
+from keystone.tests import unit as tests
+
+
+class ExceptionTestCase(tests.BaseTestCase):
+    def assertValidJsonRendering(self, e):
+        resp = wsgi.render_exception(e)
+        self.assertEqual(e.code, resp.status_int)
+        self.assertEqual('%s %s' % (e.code, e.title), resp.status)
+
+        j = jsonutils.loads(resp.body)
+        self.assertIsNotNone(j.get('error'))
+        self.assertIsNotNone(j['error'].get('code'))
+        self.assertIsNotNone(j['error'].get('title'))
+        self.assertIsNotNone(j['error'].get('message'))
+        self.assertNotIn('\n', j['error']['message'])
+        self.assertNotIn('  ', j['error']['message'])
+        self.assertTrue(type(j['error']['code']) is int)
+
+    def test_all_json_renderings(self):
+        """Everything callable in the exception module should be renderable.
+
+        ... except for the base error class (exception.Error), which is not
+        user-facing.
+
+        This test provides a custom message to bypass docstring parsing, which
+        should be tested separately.
+
+        """
+        for cls in [x for x in exception.__dict__.values() if callable(x)]:
+            if cls is not exception.Error and isinstance(cls, exception.Error):
+                self.assertValidJsonRendering(cls(message='Overridden.'))
+
+    def test_validation_error(self):
+        target = uuid.uuid4().hex
+        attribute = uuid.uuid4().hex
+        e = exception.ValidationError(target=target, attribute=attribute)
+        self.assertValidJsonRendering(e)
+        self.assertIn(target, six.text_type(e))
+        self.assertIn(attribute, six.text_type(e))
+
+    def test_not_found(self):
+        target = uuid.uuid4().hex
+        e = exception.NotFound(target=target)
+        self.assertValidJsonRendering(e)
+        self.assertIn(target, six.text_type(e))
+
+    def test_403_title(self):
+        e = exception.Forbidden()
+        resp = wsgi.render_exception(e)
+        j = jsonutils.loads(resp.body)
+        self.assertEqual('Forbidden', e.title)
+        self.assertEqual('Forbidden', j['error'].get('title'))
+
+    def test_unicode_message(self):
+        message = u'Comment \xe7a va'
+        e = exception.Error(message)
+
+        try:
+            self.assertEqual(message, six.text_type(e))
+        except UnicodeEncodeError:
+            self.fail("unicode error message not supported")
+
+    def test_unicode_string(self):
+        e = exception.ValidationError(attribute='xx',
+                                      target='Long \xe2\x80\x93 Dash')
+
+        self.assertIn(u'\u2013', six.text_type(e))
+
+    def test_invalid_unicode_string(self):
+        # NOTE(jamielennox): This is a complete failure case so what is
+        # returned in the exception message is not that important so long
+        # as there is an error with a message
+        e = exception.ValidationError(attribute='xx',
+                                      target='\xe7a va')
+        self.assertIn('%(attribute)', six.text_type(e))
+
+
+class UnexpectedExceptionTestCase(ExceptionTestCase):
+    """Tests if internal info is exposed to the API user on UnexpectedError."""
+
+    class SubClassExc(exception.UnexpectedError):
+        debug_message_format = 'Debug Message: %(debug_info)s'
+
+    def setUp(self):
+        super(UnexpectedExceptionTestCase, self).setUp()
+        self.exc_str = uuid.uuid4().hex
+        self.config_fixture = self.useFixture(config_fixture.Config(cfg.CONF))
+
+    def test_unexpected_error_no_debug(self):
+        self.config_fixture.config(debug=False)
+        e = exception.UnexpectedError(exception=self.exc_str)
+        self.assertNotIn(self.exc_str, six.text_type(e))
+
+    def test_unexpected_error_debug(self):
+        self.config_fixture.config(debug=True)
+        e = exception.UnexpectedError(exception=self.exc_str)
+        self.assertIn(self.exc_str, six.text_type(e))
+
+    def test_unexpected_error_subclass_no_debug(self):
+        self.config_fixture.config(debug=False)
+        e = UnexpectedExceptionTestCase.SubClassExc(
+            debug_info=self.exc_str)
+        self.assertEqual(exception.UnexpectedError._message_format,
+                         six.text_type(e))
+
+    def test_unexpected_error_subclass_debug(self):
+        self.config_fixture.config(debug=True)
+        subclass = self.SubClassExc
+
+        e = subclass(debug_info=self.exc_str)
+        expected = subclass.debug_message_format % {'debug_info': self.exc_str}
+        translated_amendment = six.text_type(exception.SecurityError.amendment)
+        self.assertEqual(
+            expected + six.text_type(' ') + translated_amendment,
+            six.text_type(e))
+
+    def test_unexpected_error_custom_message_no_debug(self):
+        self.config_fixture.config(debug=False)
+        e = exception.UnexpectedError(self.exc_str)
+        self.assertEqual(exception.UnexpectedError._message_format,
+                         six.text_type(e))
+
+    def test_unexpected_error_custom_message_debug(self):
+        self.config_fixture.config(debug=True)
+        e = exception.UnexpectedError(self.exc_str)
+        translated_amendment = six.text_type(exception.SecurityError.amendment)
+        self.assertEqual(
+            self.exc_str + six.text_type(' ') + translated_amendment,
+            six.text_type(e))
+
+
+class SecurityErrorTestCase(ExceptionTestCase):
+    """Tests whether security-related info is exposed to the API user."""
+
+    def setUp(self):
+        super(SecurityErrorTestCase, self).setUp()
+        self.config_fixture = self.useFixture(config_fixture.Config(cfg.CONF))
+
+    def test_unauthorized_exposure(self):
+        self.config_fixture.config(debug=False)
+
+        risky_info = uuid.uuid4().hex
+        e = exception.Unauthorized(message=risky_info)
+        self.assertValidJsonRendering(e)
+        self.assertNotIn(risky_info, six.text_type(e))
+
+    def test_unauthorized_exposure_in_debug(self):
+        self.config_fixture.config(debug=True)
+
+        risky_info = uuid.uuid4().hex
+        e = exception.Unauthorized(message=risky_info)
+        self.assertValidJsonRendering(e)
+        self.assertIn(risky_info, six.text_type(e))
+
+    def test_forbidden_exposure(self):
+        self.config_fixture.config(debug=False)
+
+        risky_info = uuid.uuid4().hex
+        e = exception.Forbidden(message=risky_info)
+        self.assertValidJsonRendering(e)
+        self.assertNotIn(risky_info, six.text_type(e))
+
+    def test_forbidden_exposure_in_debug(self):
+        self.config_fixture.config(debug=True)
+
+        risky_info = uuid.uuid4().hex
+        e = exception.Forbidden(message=risky_info)
+        self.assertValidJsonRendering(e)
+        self.assertIn(risky_info, six.text_type(e))
+
+    def test_forbidden_action_exposure(self):
+        self.config_fixture.config(debug=False)
+
+        risky_info = uuid.uuid4().hex
+        action = uuid.uuid4().hex
+        e = exception.ForbiddenAction(message=risky_info, action=action)
+        self.assertValidJsonRendering(e)
+        self.assertNotIn(risky_info, six.text_type(e))
+        self.assertIn(action, six.text_type(e))
+
+        e = exception.ForbiddenAction(action=risky_info)
+        self.assertValidJsonRendering(e)
+        self.assertIn(risky_info, six.text_type(e))
+
+    def test_forbidden_action_exposure_in_debug(self):
+        self.config_fixture.config(debug=True)
+
+        risky_info = uuid.uuid4().hex
+
+        e = exception.ForbiddenAction(message=risky_info)
+        self.assertValidJsonRendering(e)
+        self.assertIn(risky_info, six.text_type(e))
+
+        e = exception.ForbiddenAction(action=risky_info)
+        self.assertValidJsonRendering(e)
+        self.assertIn(risky_info, six.text_type(e))
+
+    def test_unicode_argument_message(self):
+        self.config_fixture.config(debug=False)
+
+        risky_info = u'\u7ee7\u7eed\u884c\u7f29\u8fdb\u6216'
+        e = exception.Forbidden(message=risky_info)
+        self.assertValidJsonRendering(e)
+        self.assertNotIn(risky_info, six.text_type(e))
diff --git a/keystone-moon/keystone/tests/unit/test_hacking_checks.py b/keystone-moon/keystone/tests/unit/test_hacking_checks.py
new file mode 100644 (file)
index 0000000..b9b047b
--- /dev/null
@@ -0,0 +1,143 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import textwrap
+
+import mock
+import pep8
+import testtools
+
+from keystone.hacking import checks
+from keystone.tests.unit.ksfixtures import hacking as hacking_fixtures
+
+
+class BaseStyleCheck(testtools.TestCase):
+
+    def setUp(self):
+        super(BaseStyleCheck, self).setUp()
+        self.code_ex = self.useFixture(self.get_fixture())
+        self.addCleanup(delattr, self, 'code_ex')
+
+    def get_checker(self):
+        """Returns the checker to be used for tests in this class."""
+        raise NotImplemented('subclasses must provide a real implementation')
+
+    def get_fixture(self):
+        return hacking_fixtures.HackingCode()
+
+    # We are patching pep8 so that only the check under test is actually
+    # installed.
+    @mock.patch('pep8._checks',
+                {'physical_line': {}, 'logical_line': {}, 'tree': {}})
+    def run_check(self, code):
+        pep8.register_check(self.get_checker())
+
+        lines = textwrap.dedent(code).strip().splitlines(True)
+
+        checker = pep8.Checker(lines=lines)
+        checker.check_all()
+        checker.report._deferred_print.sort()
+        return checker.report._deferred_print
+
+    def assert_has_errors(self, code, expected_errors=None):
+        actual_errors = [e[:3] for e in self.run_check(code)]
+        self.assertEqual(expected_errors or [], actual_errors)
+
+
+class TestCheckForMutableDefaultArgs(BaseStyleCheck):
+
+    def get_checker(self):
+        return checks.CheckForMutableDefaultArgs
+
+    def test(self):
+        code = self.code_ex.mutable_default_args['code']
+        errors = self.code_ex.mutable_default_args['expected_errors']
+        self.assert_has_errors(code, expected_errors=errors)
+
+
+class TestBlockCommentsBeginWithASpace(BaseStyleCheck):
+
+    def get_checker(self):
+        return checks.block_comments_begin_with_a_space
+
+    def test(self):
+        code = self.code_ex.comments_begin_with_space['code']
+        errors = self.code_ex.comments_begin_with_space['expected_errors']
+        self.assert_has_errors(code, expected_errors=errors)
+
+
+class TestAssertingNoneEquality(BaseStyleCheck):
+
+    def get_checker(self):
+        return checks.CheckForAssertingNoneEquality
+
+    def test(self):
+        code = self.code_ex.asserting_none_equality['code']
+        errors = self.code_ex.asserting_none_equality['expected_errors']
+        self.assert_has_errors(code, expected_errors=errors)
+
+
+class TestCheckForDebugLoggingIssues(BaseStyleCheck):
+
+    def get_checker(self):
+        return checks.CheckForLoggingIssues
+
+    def test_for_translations(self):
+        fixture = self.code_ex.assert_no_translations_for_debug_logging
+        code = fixture['code']
+        errors = fixture['expected_errors']
+        self.assert_has_errors(code, expected_errors=errors)
+
+
+class TestCheckForNonDebugLoggingIssues(BaseStyleCheck):
+
+    def get_checker(self):
+        return checks.CheckForLoggingIssues
+
+    def get_fixture(self):
+        return hacking_fixtures.HackingLogging()
+
+    def test_for_translations(self):
+        for example in self.code_ex.examples:
+            code = self.code_ex.shared_imports + example['code']
+            errors = example['expected_errors']
+            self.assert_has_errors(code, expected_errors=errors)
+
+    def assert_has_errors(self, code, expected_errors=None):
+        # pull out the parts of the error that we'll match against
+        actual_errors = (e[:3] for e in self.run_check(code))
+        # adjust line numbers to make the fixure data more readable.
+        import_lines = len(self.code_ex.shared_imports.split('\n')) - 1
+        actual_errors = [(e[0] - import_lines, e[1], e[2])
+                         for e in actual_errors]
+        self.assertEqual(expected_errors or [], actual_errors)
+
+
+class TestCheckOsloNamespaceImports(BaseStyleCheck):
+    def get_checker(self):
+        return checks.check_oslo_namespace_imports
+
+    def test(self):
+        code = self.code_ex.oslo_namespace_imports['code']
+        errors = self.code_ex.oslo_namespace_imports['expected_errors']
+        self.assert_has_errors(code, expected_errors=errors)
+
+
+class TestDictConstructorWithSequenceCopy(BaseStyleCheck):
+
+    def get_checker(self):
+        return checks.dict_constructor_with_sequence_copy
+
+    def test(self):
+        code = self.code_ex.dict_constructor['code']
+        errors = self.code_ex.dict_constructor['expected_errors']
+        self.assert_has_errors(code, expected_errors=errors)
diff --git a/keystone-moon/keystone/tests/unit/test_ipv6.py b/keystone-moon/keystone/tests/unit/test_ipv6.py
new file mode 100644 (file)
index 0000000..e3d467f
--- /dev/null
@@ -0,0 +1,51 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from oslo_config import cfg
+
+from keystone.common import environment
+from keystone.tests import unit as tests
+from keystone.tests.unit.ksfixtures import appserver
+
+
+CONF = cfg.CONF
+
+
+class IPv6TestCase(tests.TestCase):
+
+    def setUp(self):
+        self.skip_if_no_ipv6()
+        super(IPv6TestCase, self).setUp()
+        self.load_backends()
+
+    def test_ipv6_ok(self):
+        """Make sure both public and admin API work with ipv6."""
+        paste_conf = self._paste_config('keystone')
+
+        # Verify Admin
+        with appserver.AppServer(paste_conf, appserver.ADMIN, host="::1"):
+            conn = environment.httplib.HTTPConnection(
+                '::1', CONF.eventlet_server.admin_port)
+            conn.request('GET', '/')
+            resp = conn.getresponse()
+            self.assertEqual(300, resp.status)
+
+        # Verify Public
+        with appserver.AppServer(paste_conf, appserver.MAIN, host="::1"):
+            conn = environment.httplib.HTTPConnection(
+                '::1', CONF.eventlet_server.public_port)
+            conn.request('GET', '/')
+            resp = conn.getresponse()
+            self.assertEqual(300, resp.status)
diff --git a/keystone-moon/keystone/tests/unit/test_kvs.py b/keystone-moon/keystone/tests/unit/test_kvs.py
new file mode 100644 (file)
index 0000000..4d80ea3
--- /dev/null
@@ -0,0 +1,581 @@
+# Copyright 2013 Metacloud, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import time
+import uuid
+
+from dogpile.cache import api
+from dogpile.cache import proxy
+from dogpile.cache import util
+import mock
+import six
+from testtools import matchers
+
+from keystone.common.kvs.backends import inmemdb
+from keystone.common.kvs.backends import memcached
+from keystone.common.kvs import core
+from keystone import exception
+from keystone.tests import unit as tests
+
+NO_VALUE = api.NO_VALUE
+
+
+class MutexFixture(object):
+    def __init__(self, storage_dict, key, timeout):
+        self.database = storage_dict
+        self.key = '_lock' + key
+
+    def acquire(self, wait=True):
+        while True:
+            try:
+                self.database[self.key] = 1
+                return True
+            except KeyError:
+                return False
+
+    def release(self):
+        self.database.pop(self.key, None)
+
+
+class KVSBackendFixture(inmemdb.MemoryBackend):
+    def __init__(self, arguments):
+        class InmemTestDB(dict):
+            def __setitem__(self, key, value):
+                if key in self:
+                    raise KeyError('Key %s already exists' % key)
+                super(InmemTestDB, self).__setitem__(key, value)
+
+        self._db = InmemTestDB()
+        self.lock_timeout = arguments.pop('lock_timeout', 5)
+        self.test_arg = arguments.pop('test_arg', None)
+
+    def get_mutex(self, key):
+        return MutexFixture(self._db, key, self.lock_timeout)
+
+    @classmethod
+    def key_mangler(cls, key):
+        return 'KVSBackend_' + key
+
+
+class KVSBackendForcedKeyMangleFixture(KVSBackendFixture):
+    use_backend_key_mangler = True
+
+    @classmethod
+    def key_mangler(cls, key):
+        return 'KVSBackendForcedKeyMangle_' + key
+
+
+class RegionProxyFixture(proxy.ProxyBackend):
+    """A test dogpile.cache proxy that does nothing."""
+
+
+class RegionProxy2Fixture(proxy.ProxyBackend):
+    """A test dogpile.cache proxy that does nothing."""
+
+
+class TestMemcacheDriver(api.CacheBackend):
+    """A test dogpile.cache backend that conforms to the mixin-mechanism for
+    overriding set and set_multi methods on dogpile memcached drivers.
+    """
+    class test_client(object):
+        # FIXME(morganfainberg): Convert this test client over to using mock
+        # and/or mock.MagicMock as appropriate
+
+        def __init__(self):
+            self.__name__ = 'TestingMemcacheDriverClientObject'
+            self.set_arguments_passed = None
+            self.keys_values = {}
+            self.lock_set_time = None
+            self.lock_expiry = None
+
+        def set(self, key, value, **set_arguments):
+            self.keys_values.clear()
+            self.keys_values[key] = value
+            self.set_arguments_passed = set_arguments
+
+        def set_multi(self, mapping, **set_arguments):
+            self.keys_values.clear()
+            self.keys_values = mapping
+            self.set_arguments_passed = set_arguments
+
+        def add(self, key, value, expiry_time):
+            # NOTE(morganfainberg): `add` is used in this case for the
+            # memcache lock testing. If further testing is required around the
+            # actual memcache `add` interface, this method should be
+            # expanded to work more like the actual memcache `add` function
+            if self.lock_expiry is not None and self.lock_set_time is not None:
+                if time.time() - self.lock_set_time < self.lock_expiry:
+                    return False
+            self.lock_expiry = expiry_time
+            self.lock_set_time = time.time()
+            return True
+
+        def delete(self, key):
+            # NOTE(morganfainberg): `delete` is used in this case for the
+            # memcache lock testing. If further testing is required around the
+            # actual memcache `delete` interface, this method should be
+            # expanded to work more like the actual memcache `delete` function.
+            self.lock_expiry = None
+            self.lock_set_time = None
+            return True
+
+    def __init__(self, arguments):
+        self.client = self.test_client()
+        self.set_arguments = {}
+        # NOTE(morganfainberg): This is the same logic as the dogpile backend
+        # since we need to mirror that functionality for the `set_argument`
+        # values to appear on the actual backend.
+        if 'memcached_expire_time' in arguments:
+            self.set_arguments['time'] = arguments['memcached_expire_time']
+
+    def set(self, key, value):
+        self.client.set(key, value, **self.set_arguments)
+
+    def set_multi(self, mapping):
+        self.client.set_multi(mapping, **self.set_arguments)
+
+
+class KVSTest(tests.TestCase):
+    def setUp(self):
+        super(KVSTest, self).setUp()
+        self.key_foo = 'foo_' + uuid.uuid4().hex
+        self.value_foo = uuid.uuid4().hex
+        self.key_bar = 'bar_' + uuid.uuid4().hex
+        self.value_bar = {'complex_data_structure': uuid.uuid4().hex}
+        self.addCleanup(memcached.VALID_DOGPILE_BACKENDS.pop,
+                        'TestDriver',
+                        None)
+        memcached.VALID_DOGPILE_BACKENDS['TestDriver'] = TestMemcacheDriver
+
+    def _get_kvs_region(self, name=None):
+        if name is None:
+            name = uuid.uuid4().hex
+        return core.get_key_value_store(name)
+
+    def test_kvs_basic_configuration(self):
+        # Test that the most basic configuration options pass through to the
+        # backend.
+        region_one = uuid.uuid4().hex
+        region_two = uuid.uuid4().hex
+        test_arg = 100
+        kvs = self._get_kvs_region(region_one)
+        kvs.configure('openstack.kvs.Memory')
+
+        self.assertIsInstance(kvs._region.backend, inmemdb.MemoryBackend)
+        self.assertEqual(region_one, kvs._region.name)
+
+        kvs = self._get_kvs_region(region_two)
+        kvs.configure('openstack.kvs.KVSBackendFixture',
+                      test_arg=test_arg)
+
+        self.assertEqual(region_two, kvs._region.name)
+        self.assertEqual(test_arg, kvs._region.backend.test_arg)
+
+    def test_kvs_proxy_configuration(self):
+        # Test that proxies are applied correctly and in the correct (reverse)
+        # order to the kvs region.
+        kvs = self._get_kvs_region()
+        kvs.configure(
+            'openstack.kvs.Memory',
+            proxy_list=['keystone.tests.unit.test_kvs.RegionProxyFixture',
+                        'keystone.tests.unit.test_kvs.RegionProxy2Fixture'])
+
+        self.assertIsInstance(kvs._region.backend, RegionProxyFixture)
+        self.assertIsInstance(kvs._region.backend.proxied, RegionProxy2Fixture)
+        self.assertIsInstance(kvs._region.backend.proxied.proxied,
+                              inmemdb.MemoryBackend)
+
+    def test_kvs_key_mangler_fallthrough_default(self):
+        # Test to make sure we default to the standard dogpile sha1 hashing
+        # key_mangler
+        kvs = self._get_kvs_region()
+        kvs.configure('openstack.kvs.Memory')
+
+        self.assertIs(kvs._region.key_mangler, util.sha1_mangle_key)
+        # The backend should also have the keymangler set the same as the
+        # region now.
+        self.assertIs(kvs._region.backend.key_mangler, util.sha1_mangle_key)
+
+    def test_kvs_key_mangler_configuration_backend(self):
+        kvs = self._get_kvs_region()
+        kvs.configure('openstack.kvs.KVSBackendFixture')
+        expected = KVSBackendFixture.key_mangler(self.key_foo)
+        self.assertEqual(expected, kvs._region.key_mangler(self.key_foo))
+
+    def test_kvs_key_mangler_configuration_forced_backend(self):
+        kvs = self._get_kvs_region()
+        kvs.configure('openstack.kvs.KVSBackendForcedKeyMangleFixture',
+                      key_mangler=util.sha1_mangle_key)
+        expected = KVSBackendForcedKeyMangleFixture.key_mangler(self.key_foo)
+        self.assertEqual(expected, kvs._region.key_mangler(self.key_foo))
+
+    def test_kvs_key_mangler_configuration_disabled(self):
+        # Test that no key_mangler is set if enable_key_mangler is false
+        self.config_fixture.config(group='kvs', enable_key_mangler=False)
+        kvs = self._get_kvs_region()
+        kvs.configure('openstack.kvs.Memory')
+
+        self.assertIsNone(kvs._region.key_mangler)
+        self.assertIsNone(kvs._region.backend.key_mangler)
+
+    def test_kvs_key_mangler_set_on_backend(self):
+        def test_key_mangler(key):
+            return key
+
+        kvs = self._get_kvs_region()
+        kvs.configure('openstack.kvs.Memory')
+        self.assertIs(kvs._region.backend.key_mangler, util.sha1_mangle_key)
+        kvs._set_key_mangler(test_key_mangler)
+        self.assertIs(kvs._region.backend.key_mangler, test_key_mangler)
+
+    def test_kvs_basic_get_set_delete(self):
+        # Test the basic get/set/delete actions on the KVS region
+        kvs = self._get_kvs_region()
+        kvs.configure('openstack.kvs.Memory')
+
+        # Not found should be raised if the key doesn't exist
+        self.assertRaises(exception.NotFound, kvs.get, key=self.key_bar)
+        kvs.set(self.key_bar, self.value_bar)
+        returned_value = kvs.get(self.key_bar)
+        # The returned value should be the same value as the value in .set
+        self.assertEqual(self.value_bar, returned_value)
+        # The value should not be the exact object used in .set
+        self.assertIsNot(returned_value, self.value_bar)
+        kvs.delete(self.key_bar)
+        # Second delete should raise NotFound
+        self.assertRaises(exception.NotFound, kvs.delete, key=self.key_bar)
+
+    def _kvs_multi_get_set_delete(self, kvs):
+        keys = [self.key_foo, self.key_bar]
+        expected = [self.value_foo, self.value_bar]
+
+        kvs.set_multi({self.key_foo: self.value_foo,
+                       self.key_bar: self.value_bar})
+        # Returned value from get_multi should be a list of the values of the
+        # keys
+        self.assertEqual(expected, kvs.get_multi(keys))
+        # Delete both keys
+        kvs.delete_multi(keys)
+        # make sure that NotFound is properly raised when trying to get the now
+        # deleted keys
+        self.assertRaises(exception.NotFound, kvs.get_multi, keys=keys)
+        self.assertRaises(exception.NotFound, kvs.get, key=self.key_foo)
+        self.assertRaises(exception.NotFound, kvs.get, key=self.key_bar)
+        # Make sure get_multi raises NotFound if one of the keys isn't found
+        kvs.set(self.key_foo, self.value_foo)
+        self.assertRaises(exception.NotFound, kvs.get_multi, keys=keys)
+
+    def test_kvs_multi_get_set_delete(self):
+        kvs = self._get_kvs_region()
+        kvs.configure('openstack.kvs.Memory')
+
+        self._kvs_multi_get_set_delete(kvs)
+
+    def test_kvs_locking_context_handler(self):
+        # Make sure we're creating the correct key/value pairs for the backend
+        # distributed locking mutex.
+        self.config_fixture.config(group='kvs', enable_key_mangler=False)
+        kvs = self._get_kvs_region()
+        kvs.configure('openstack.kvs.KVSBackendFixture')
+
+        lock_key = '_lock' + self.key_foo
+        self.assertNotIn(lock_key, kvs._region.backend._db)
+        with core.KeyValueStoreLock(kvs._mutex(self.key_foo), self.key_foo):
+            self.assertIn(lock_key, kvs._region.backend._db)
+            self.assertIs(kvs._region.backend._db[lock_key], 1)
+
+        self.assertNotIn(lock_key, kvs._region.backend._db)
+
+    def test_kvs_locking_context_handler_locking_disabled(self):
+        # Make sure no creation of key/value pairs for the backend
+        # distributed locking mutex occurs if locking is disabled.
+        self.config_fixture.config(group='kvs', enable_key_mangler=False)
+        kvs = self._get_kvs_region()
+        kvs.configure('openstack.kvs.KVSBackendFixture', locking=False)
+        lock_key = '_lock' + self.key_foo
+        self.assertNotIn(lock_key, kvs._region.backend._db)
+        with core.KeyValueStoreLock(kvs._mutex(self.key_foo), self.key_foo,
+                                    False):
+            self.assertNotIn(lock_key, kvs._region.backend._db)
+
+        self.assertNotIn(lock_key, kvs._region.backend._db)
+
+    def test_kvs_with_lock_action_context_manager_timeout(self):
+        kvs = self._get_kvs_region()
+        lock_timeout = 5
+        kvs.configure('openstack.kvs.Memory', lock_timeout=lock_timeout)
+
+        def do_with_lock_action_timeout(kvs_region, key, offset):
+            with kvs_region.get_lock(key) as lock_in_use:
+                self.assertTrue(lock_in_use.active)
+                # Subtract the offset from the acquire_time.  If this puts the
+                # acquire_time difference from time.time() at >= lock_timeout
+                # this should raise a LockTimeout exception.  This is because
+                # there is a built-in 1-second overlap where the context
+                # manager thinks the lock is expired but the lock is still
+                # active.  This is to help mitigate race conditions on the
+                # time-check itself.
+                lock_in_use.acquire_time -= offset
+                with kvs_region._action_with_lock(key, lock_in_use):
+                    pass
+
+        # This should succeed, we are not timed-out here.
+        do_with_lock_action_timeout(kvs, key=uuid.uuid4().hex, offset=2)
+        # Try it now with an offset equal to the lock_timeout
+        self.assertRaises(core.LockTimeout,
+                          do_with_lock_action_timeout,
+                          kvs_region=kvs,
+                          key=uuid.uuid4().hex,
+                          offset=lock_timeout)
+        # Final test with offset significantly greater than the lock_timeout
+        self.assertRaises(core.LockTimeout,
+                          do_with_lock_action_timeout,
+                          kvs_region=kvs,
+                          key=uuid.uuid4().hex,
+                          offset=100)
+
+    def test_kvs_with_lock_action_mismatched_keys(self):
+        kvs = self._get_kvs_region()
+        kvs.configure('openstack.kvs.Memory')
+
+        def do_with_lock_action(kvs_region, lock_key, target_key):
+            with kvs_region.get_lock(lock_key) as lock_in_use:
+                self.assertTrue(lock_in_use.active)
+                with kvs_region._action_with_lock(target_key, lock_in_use):
+                    pass
+
+        # Ensure we raise a ValueError if the lock key mismatches from the
+        # target key.
+        self.assertRaises(ValueError,
+                          do_with_lock_action,
+                          kvs_region=kvs,
+                          lock_key=self.key_foo,
+                          target_key=self.key_bar)
+
+    def test_kvs_with_lock_action_context_manager(self):
+        # Make sure we're creating the correct key/value pairs for the backend
+        # distributed locking mutex.
+        self.config_fixture.config(group='kvs', enable_key_mangler=False)
+        kvs = self._get_kvs_region()
+        kvs.configure('openstack.kvs.KVSBackendFixture')
+
+        lock_key = '_lock' + self.key_foo
+        self.assertNotIn(lock_key, kvs._region.backend._db)
+        with kvs.get_lock(self.key_foo) as lock:
+            with kvs._action_with_lock(self.key_foo, lock):
+                self.assertTrue(lock.active)
+                self.assertIn(lock_key, kvs._region.backend._db)
+                self.assertIs(kvs._region.backend._db[lock_key], 1)
+
+        self.assertNotIn(lock_key, kvs._region.backend._db)
+
+    def test_kvs_with_lock_action_context_manager_no_lock(self):
+        # Make sure we're not locking unless an actual lock is passed into the
+        # context manager
+        self.config_fixture.config(group='kvs', enable_key_mangler=False)
+        kvs = self._get_kvs_region()
+        kvs.configure('openstack.kvs.KVSBackendFixture')
+
+        lock_key = '_lock' + self.key_foo
+        lock = None
+        self.assertNotIn(lock_key, kvs._region.backend._db)
+        with kvs._action_with_lock(self.key_foo, lock):
+            self.assertNotIn(lock_key, kvs._region.backend._db)
+
+        self.assertNotIn(lock_key, kvs._region.backend._db)
+
+    def test_kvs_backend_registration_does_not_reregister_backends(self):
+        # SetUp registers the test backends.  Running this again would raise an
+        # exception if re-registration of the backends occurred.
+        kvs = self._get_kvs_region()
+        kvs.configure('openstack.kvs.Memory')
+        core._register_backends()
+
+    def test_kvs_memcached_manager_valid_dogpile_memcached_backend(self):
+        kvs = self._get_kvs_region()
+        kvs.configure('openstack.kvs.Memcached',
+                      memcached_backend='TestDriver')
+        self.assertIsInstance(kvs._region.backend.driver,
+                              TestMemcacheDriver)
+
+    def test_kvs_memcached_manager_invalid_dogpile_memcached_backend(self):
+        # Invalid dogpile memcache backend should raise ValueError
+        kvs = self._get_kvs_region()
+        self.assertRaises(ValueError,
+                          kvs.configure,
+                          backing_store='openstack.kvs.Memcached',
+                          memcached_backend=uuid.uuid4().hex)
+
+    def test_kvs_memcache_manager_no_expiry_keys(self):
+        # Make sure the memcache backend recalculates the no-expiry keys
+        # correctly when a key-mangler is set on it.
+
+        def new_mangler(key):
+            return '_mangled_key_' + key
+
+        kvs = self._get_kvs_region()
+        no_expiry_keys = set(['test_key'])
+        kvs.configure('openstack.kvs.Memcached',
+                      memcached_backend='TestDriver',
+                      no_expiry_keys=no_expiry_keys)
+        calculated_keys = set([kvs._region.key_mangler(key)
+                               for key in no_expiry_keys])
+        self.assertIs(kvs._region.backend.key_mangler, util.sha1_mangle_key)
+        self.assertSetEqual(calculated_keys,
+                            kvs._region.backend.no_expiry_hashed_keys)
+        self.assertSetEqual(no_expiry_keys,
+                            kvs._region.backend.raw_no_expiry_keys)
+        calculated_keys = set([new_mangler(key) for key in no_expiry_keys])
+        kvs._region.backend.key_mangler = new_mangler
+        self.assertSetEqual(calculated_keys,
+                            kvs._region.backend.no_expiry_hashed_keys)
+        self.assertSetEqual(no_expiry_keys,
+                            kvs._region.backend.raw_no_expiry_keys)
+
+    def test_kvs_memcache_key_mangler_set_to_none(self):
+        kvs = self._get_kvs_region()
+        no_expiry_keys = set(['test_key'])
+        kvs.configure('openstack.kvs.Memcached',
+                      memcached_backend='TestDriver',
+                      no_expiry_keys=no_expiry_keys)
+        self.assertIs(kvs._region.backend.key_mangler, util.sha1_mangle_key)
+        kvs._region.backend.key_mangler = None
+        self.assertSetEqual(kvs._region.backend.raw_no_expiry_keys,
+                            kvs._region.backend.no_expiry_hashed_keys)
+        self.assertIsNone(kvs._region.backend.key_mangler)
+
+    def test_noncallable_key_mangler_set_on_driver_raises_type_error(self):
+        kvs = self._get_kvs_region()
+        kvs.configure('openstack.kvs.Memcached',
+                      memcached_backend='TestDriver')
+        self.assertRaises(TypeError,
+                          setattr,
+                          kvs._region.backend,
+                          'key_mangler',
+                          'Non-Callable')
+
+    def test_kvs_memcache_set_arguments_and_memcache_expires_ttl(self):
+        # Test the "set_arguments" (arguments passed on all set calls) logic
+        # and the no-expiry-key modifications of set_arguments for the explicit
+        # memcache TTL.
+        self.config_fixture.config(group='kvs', enable_key_mangler=False)
+        kvs = self._get_kvs_region()
+        memcache_expire_time = 86400
+
+        expected_set_args = {'time': memcache_expire_time}
+        expected_no_expiry_args = {}
+
+        expected_foo_keys = [self.key_foo]
+        expected_bar_keys = [self.key_bar]
+
+        mapping_foo = {self.key_foo: self.value_foo}
+        mapping_bar = {self.key_bar: self.value_bar}
+
+        kvs.configure(backing_store='openstack.kvs.Memcached',
+                      memcached_backend='TestDriver',
+                      memcached_expire_time=memcache_expire_time,
+                      some_other_arg=uuid.uuid4().hex,
+                      no_expiry_keys=[self.key_bar])
+        # Ensure the set_arguments are correct
+        self.assertDictEqual(
+            kvs._region.backend._get_set_arguments_driver_attr(),
+            expected_set_args)
+
+        # Set a key that would have an expiry and verify the correct result
+        # occurred and that the correct set_arguments were passed.
+        kvs.set(self.key_foo, self.value_foo)
+        self.assertDictEqual(
+            kvs._region.backend.driver.client.set_arguments_passed,
+            expected_set_args)
+        self.assertEqual(expected_foo_keys,
+                         kvs._region.backend.driver.client.keys_values.keys())
+        self.assertEqual(
+            self.value_foo,
+            kvs._region.backend.driver.client.keys_values[self.key_foo][0])
+
+        # Set a key that would not have an expiry and verify the correct result
+        # occurred and that the correct set_arguments were passed.
+        kvs.set(self.key_bar, self.value_bar)
+        self.assertDictEqual(
+            kvs._region.backend.driver.client.set_arguments_passed,
+            expected_no_expiry_args)
+        self.assertEqual(expected_bar_keys,
+                         kvs._region.backend.driver.client.keys_values.keys())
+        self.assertEqual(
+            self.value_bar,
+            kvs._region.backend.driver.client.keys_values[self.key_bar][0])
+
+        # set_multi a dict that would have an expiry and verify the correct
+        # result occurred and that the correct set_arguments were passed.
+        kvs.set_multi(mapping_foo)
+        self.assertDictEqual(
+            kvs._region.backend.driver.client.set_arguments_passed,
+            expected_set_args)
+        self.assertEqual(expected_foo_keys,
+                         kvs._region.backend.driver.client.keys_values.keys())
+        self.assertEqual(
+            self.value_foo,
+            kvs._region.backend.driver.client.keys_values[self.key_foo][0])
+
+        # set_multi a dict that would not have an expiry and verify the correct
+        # result occurred and that the correct set_arguments were passed.
+        kvs.set_multi(mapping_bar)
+        self.assertDictEqual(
+            kvs._region.backend.driver.client.set_arguments_passed,
+            expected_no_expiry_args)
+        self.assertEqual(expected_bar_keys,
+                         kvs._region.backend.driver.client.keys_values.keys())
+        self.assertEqual(
+            self.value_bar,
+            kvs._region.backend.driver.client.keys_values[self.key_bar][0])
+
+    def test_memcached_lock_max_lock_attempts(self):
+        kvs = self._get_kvs_region()
+        max_lock_attempts = 1
+        test_key = uuid.uuid4().hex
+
+        kvs.configure(backing_store='openstack.kvs.Memcached',
+                      memcached_backend='TestDriver',
+                      max_lock_attempts=max_lock_attempts)
+
+        self.assertEqual(max_lock_attempts,
+                         kvs._region.backend.max_lock_attempts)
+        # Simple Lock success test
+        with kvs.get_lock(test_key) as lock:
+            kvs.set(test_key, 'testing', lock)
+
+        def lock_within_a_lock(key):
+            with kvs.get_lock(key) as first_lock:
+                kvs.set(test_key, 'lock', first_lock)
+                with kvs.get_lock(key) as second_lock:
+                    kvs.set(key, 'lock-within-a-lock', second_lock)
+
+        self.assertRaises(exception.UnexpectedError,
+                          lock_within_a_lock,
+                          key=test_key)
+
+
+class TestMemcachedBackend(tests.TestCase):
+
+    @mock.patch('keystone.common.kvs.backends.memcached._', six.text_type)
+    def test_invalid_backend_fails_initialization(self):
+        raises_valueerror = matchers.Raises(matchers.MatchesException(
+            ValueError, r'.*FakeBackend.*'))
+
+        options = {
+            'url': 'needed to get to the focus of this test (the backend)',
+            'memcached_backend': 'FakeBackend',
+        }
+        self.assertThat(lambda: memcached.MemcachedBackend(options),
+                        raises_valueerror)
diff --git a/keystone-moon/keystone/tests/unit/test_ldap_livetest.py b/keystone-moon/keystone/tests/unit/test_ldap_livetest.py
new file mode 100644 (file)
index 0000000..5b44936
--- /dev/null
@@ -0,0 +1,229 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import subprocess
+import uuid
+
+import ldap
+import ldap.modlist
+from oslo_config import cfg
+
+from keystone import exception
+from keystone.identity.backends import ldap as identity_ldap
+from keystone.tests import unit as tests
+from keystone.tests.unit import test_backend_ldap
+
+
+CONF = cfg.CONF
+
+
+def create_object(dn, attrs):
+    conn = ldap.initialize(CONF.ldap.url)
+    conn.simple_bind_s(CONF.ldap.user, CONF.ldap.password)
+    ldif = ldap.modlist.addModlist(attrs)
+    conn.add_s(dn, ldif)
+    conn.unbind_s()
+
+
+class LiveLDAPIdentity(test_backend_ldap.LDAPIdentity):
+
+    def setUp(self):
+        self._ldap_skip_live()
+        super(LiveLDAPIdentity, self).setUp()
+
+    def _ldap_skip_live(self):
+        self.skip_if_env_not_set('ENABLE_LDAP_LIVE_TEST')
+
+    def clear_database(self):
+        devnull = open('/dev/null', 'w')
+        subprocess.call(['ldapdelete',
+                         '-x',
+                         '-D', CONF.ldap.user,
+                         '-H', CONF.ldap.url,
+                         '-w', CONF.ldap.password,
+                         '-r', CONF.ldap.suffix],
+                        stderr=devnull)
+
+        if CONF.ldap.suffix.startswith('ou='):
+            tree_dn_attrs = {'objectclass': 'organizationalUnit',
+                             'ou': 'openstack'}
+        else:
+            tree_dn_attrs = {'objectclass': ['dcObject', 'organizationalUnit'],
+                             'dc': 'openstack',
+                             'ou': 'openstack'}
+        create_object(CONF.ldap.suffix, tree_dn_attrs)
+        create_object(CONF.ldap.user_tree_dn,
+                      {'objectclass': 'organizationalUnit',
+                       'ou': 'Users'})
+        create_object(CONF.ldap.role_tree_dn,
+                      {'objectclass': 'organizationalUnit',
+                       'ou': 'Roles'})
+        create_object(CONF.ldap.project_tree_dn,
+                      {'objectclass': 'organizationalUnit',
+                       'ou': 'Projects'})
+        create_object(CONF.ldap.group_tree_dn,
+                      {'objectclass': 'organizationalUnit',
+                       'ou': 'UserGroups'})
+
+    def config_files(self):
+        config_files = super(LiveLDAPIdentity, self).config_files()
+        config_files.append(tests.dirs.tests_conf('backend_liveldap.conf'))
+        return config_files
+
+    def config_overrides(self):
+        super(LiveLDAPIdentity, self).config_overrides()
+        self.config_fixture.config(
+            group='identity',
+            driver='keystone.identity.backends.ldap.Identity')
+
+    def test_build_tree(self):
+        """Regression test for building the tree names
+        """
+        # logic is different from the fake backend.
+        user_api = identity_ldap.UserApi(CONF)
+        self.assertTrue(user_api)
+        self.assertEqual(user_api.tree_dn, CONF.ldap.user_tree_dn)
+
+    def tearDown(self):
+        tests.TestCase.tearDown(self)
+
+    def test_ldap_dereferencing(self):
+        alt_users_ldif = {'objectclass': ['top', 'organizationalUnit'],
+                          'ou': 'alt_users'}
+        alt_fake_user_ldif = {'objectclass': ['person', 'inetOrgPerson'],
+                              'cn': 'alt_fake1',
+                              'sn': 'alt_fake1'}
+        aliased_users_ldif = {'objectclass': ['alias', 'extensibleObject'],
+                              'aliasedobjectname': "ou=alt_users,%s" %
+                              CONF.ldap.suffix}
+        create_object("ou=alt_users,%s" % CONF.ldap.suffix, alt_users_ldif)
+        create_object("%s=alt_fake1,ou=alt_users,%s" %
+                      (CONF.ldap.user_id_attribute, CONF.ldap.suffix),
+                      alt_fake_user_ldif)
+        create_object("ou=alt_users,%s" % CONF.ldap.user_tree_dn,
+                      aliased_users_ldif)
+
+        self.config_fixture.config(group='ldap',
+                                   query_scope='sub',
+                                   alias_dereferencing='never')
+        self.identity_api = identity_ldap.Identity()
+        self.assertRaises(exception.UserNotFound,
+                          self.identity_api.get_user,
+                          'alt_fake1')
+
+        self.config_fixture.config(group='ldap',
+                                   alias_dereferencing='searching')
+        self.identity_api = identity_ldap.Identity()
+        user_ref = self.identity_api.get_user('alt_fake1')
+        self.assertEqual('alt_fake1', user_ref['id'])
+
+        self.config_fixture.config(group='ldap', alias_dereferencing='always')
+        self.identity_api = identity_ldap.Identity()
+        user_ref = self.identity_api.get_user('alt_fake1')
+        self.assertEqual('alt_fake1', user_ref['id'])
+
+    # FakeLDAP does not correctly process filters, so this test can only be
+    # run against a live LDAP server
+    def test_list_groups_for_user_filtered(self):
+        domain = self._get_domain_fixture()
+        test_groups = []
+        test_users = []
+        GROUP_COUNT = 3
+        USER_COUNT = 2
+
+        for x in range(0, USER_COUNT):
+            new_user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
+                        'enabled': True, 'domain_id': domain['id']}
+            new_user = self.identity_api.create_user(new_user)
+            test_users.append(new_user)
+        positive_user = test_users[0]
+        negative_user = test_users[1]
+
+        for x in range(0, USER_COUNT):
+            group_refs = self.identity_api.list_groups_for_user(
+                test_users[x]['id'])
+            self.assertEqual(0, len(group_refs))
+
+        for x in range(0, GROUP_COUNT):
+            new_group = {'domain_id': domain['id'],
+                         'name': uuid.uuid4().hex}
+            new_group = self.identity_api.create_group(new_group)
+            test_groups.append(new_group)
+
+            group_refs = self.identity_api.list_groups_for_user(
+                positive_user['id'])
+            self.assertEqual(x, len(group_refs))
+
+            self.identity_api.add_user_to_group(
+                positive_user['id'],
+                new_group['id'])
+            group_refs = self.identity_api.list_groups_for_user(
+                positive_user['id'])
+            self.assertEqual(x + 1, len(group_refs))
+
+            group_refs = self.identity_api.list_groups_for_user(
+                negative_user['id'])
+            self.assertEqual(0, len(group_refs))
+
+        self.config_fixture.config(group='ldap', group_filter='(dn=xx)')
+        self.reload_backends(CONF.identity.default_domain_id)
+        group_refs = self.identity_api.list_groups_for_user(
+            positive_user['id'])
+        self.assertEqual(0, len(group_refs))
+        group_refs = self.identity_api.list_groups_for_user(
+            negative_user['id'])
+        self.assertEqual(0, len(group_refs))
+
+        self.config_fixture.config(group='ldap',
+                                   group_filter='(objectclass=*)')
+        self.reload_backends(CONF.identity.default_domain_id)
+        group_refs = self.identity_api.list_groups_for_user(
+            positive_user['id'])
+        self.assertEqual(GROUP_COUNT, len(group_refs))
+        group_refs = self.identity_api.list_groups_for_user(
+            negative_user['id'])
+        self.assertEqual(0, len(group_refs))
+
+    def test_user_enable_attribute_mask(self):
+        self.config_fixture.config(
+            group='ldap',
+            user_enabled_emulation=False,
+            user_enabled_attribute='employeeType')
+        super(LiveLDAPIdentity, self).test_user_enable_attribute_mask()
+
+    def test_create_project_case_sensitivity(self):
+        # The attribute used for the live LDAP tests is case insensitive.
+
+        def call_super():
+            (super(LiveLDAPIdentity, self).
+                test_create_project_case_sensitivity())
+
+        self.assertRaises(exception.Conflict, call_super)
+
+    def test_create_user_case_sensitivity(self):
+        # The attribute used for the live LDAP tests is case insensitive.
+
+        def call_super():
+            super(LiveLDAPIdentity, self).test_create_user_case_sensitivity()
+
+        self.assertRaises(exception.Conflict, call_super)
+
+    def test_project_update_missing_attrs_with_a_falsey_value(self):
+        # The description attribute doesn't allow an empty value.
+
+        def call_super():
+            (super(LiveLDAPIdentity, self).
+                test_project_update_missing_attrs_with_a_falsey_value())
+
+        self.assertRaises(ldap.INVALID_SYNTAX, call_super)
diff --git a/keystone-moon/keystone/tests/unit/test_ldap_pool_livetest.py b/keystone-moon/keystone/tests/unit/test_ldap_pool_livetest.py
new file mode 100644 (file)
index 0000000..02fa814
--- /dev/null
@@ -0,0 +1,208 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import ldappool
+from oslo_config import cfg
+
+from keystone.common.ldap import core as ldap_core
+from keystone.identity.backends import ldap
+from keystone.tests import unit as tests
+from keystone.tests.unit import fakeldap
+from keystone.tests.unit import test_backend_ldap_pool
+from keystone.tests.unit import test_ldap_livetest
+
+
+CONF = cfg.CONF
+
+
+class LiveLDAPPoolIdentity(test_backend_ldap_pool.LdapPoolCommonTestMixin,
+                           test_ldap_livetest.LiveLDAPIdentity):
+    """Executes existing LDAP live test with pooled LDAP handler to make
+    sure it works without any error.
+
+    Also executes common pool specific tests via Mixin class.
+    """
+
+    def setUp(self):
+        super(LiveLDAPPoolIdentity, self).setUp()
+        self.addCleanup(self.cleanup_pools)
+        # storing to local variable to avoid long references
+        self.conn_pools = ldap_core.PooledLDAPHandler.connection_pools
+
+    def config_files(self):
+        config_files = super(LiveLDAPPoolIdentity, self).config_files()
+        config_files.append(tests.dirs.
+                            tests_conf('backend_pool_liveldap.conf'))
+        return config_files
+
+    def config_overrides(self):
+        super(LiveLDAPPoolIdentity, self).config_overrides()
+        self.config_fixture.config(
+            group='identity',
+            driver='keystone.identity.backends.ldap.Identity')
+
+    def test_assert_connector_used_not_fake_ldap_pool(self):
+        handler = ldap_core._get_connection(CONF.ldap.url, use_pool=True)
+        self.assertNotEqual(type(handler.Connector),
+                            type(fakeldap.FakeLdapPool))
+        self.assertEqual(type(ldappool.StateConnector),
+                         type(handler.Connector))
+
+    def test_async_search_and_result3(self):
+        self.config_fixture.config(group='ldap', page_size=1)
+        self.test_user_enable_attribute_mask()
+
+    def test_pool_size_expands_correctly(self):
+
+        who = CONF.ldap.user
+        cred = CONF.ldap.password
+        # get related connection manager instance
+        ldappool_cm = self.conn_pools[CONF.ldap.url]
+
+        def _get_conn():
+            return ldappool_cm.connection(who, cred)
+
+        with _get_conn() as c1:  # 1
+            self.assertEqual(1, len(ldappool_cm))
+            self.assertTrue(c1.connected, True)
+            self.assertTrue(c1.active, True)
+            with _get_conn() as c2:  # conn2
+                self.assertEqual(2, len(ldappool_cm))
+                self.assertTrue(c2.connected)
+                self.assertTrue(c2.active)
+
+            self.assertEqual(2, len(ldappool_cm))
+            # c2 went out of context, its connected but not active
+            self.assertTrue(c2.connected)
+            self.assertFalse(c2.active)
+            with _get_conn() as c3:  # conn3
+                self.assertEqual(2, len(ldappool_cm))
+                self.assertTrue(c3.connected)
+                self.assertTrue(c3.active)
+                self.assertTrue(c3 is c2)  # same connection is reused
+                self.assertTrue(c2.active)
+                with _get_conn() as c4:  # conn4
+                    self.assertEqual(3, len(ldappool_cm))
+                    self.assertTrue(c4.connected)
+                    self.assertTrue(c4.active)
+
+    def test_password_change_with_auth_pool_disabled(self):
+        self.config_fixture.config(group='ldap', use_auth_pool=False)
+        old_password = self.user_sna['password']
+
+        self.test_password_change_with_pool()
+
+        self.assertRaises(AssertionError,
+                          self.identity_api.authenticate,
+                          context={},
+                          user_id=self.user_sna['id'],
+                          password=old_password)
+
+    def _create_user_and_authenticate(self, password):
+        user_dict = {
+            'domain_id': CONF.identity.default_domain_id,
+            'name': uuid.uuid4().hex,
+            'password': password}
+        user = self.identity_api.create_user(user_dict)
+
+        self.identity_api.authenticate(
+            context={},
+            user_id=user['id'],
+            password=password)
+
+        return self.identity_api.get_user(user['id'])
+
+    def _get_auth_conn_pool_cm(self):
+        pool_url = ldap_core.PooledLDAPHandler.auth_pool_prefix + CONF.ldap.url
+        return self.conn_pools[pool_url]
+
+    def _do_password_change_for_one_user(self, password, new_password):
+        self.config_fixture.config(group='ldap', use_auth_pool=True)
+        self.cleanup_pools()
+        self.load_backends()
+
+        user1 = self._create_user_and_authenticate(password)
+        auth_cm = self._get_auth_conn_pool_cm()
+        self.assertEqual(1, len(auth_cm))
+        user2 = self._create_user_and_authenticate(password)
+        self.assertEqual(1, len(auth_cm))
+        user3 = self._create_user_and_authenticate(password)
+        self.assertEqual(1, len(auth_cm))
+        user4 = self._create_user_and_authenticate(password)
+        self.assertEqual(1, len(auth_cm))
+        user5 = self._create_user_and_authenticate(password)
+        self.assertEqual(1, len(auth_cm))
+
+        # connection pool size remains 1 even for different user ldap bind
+        # as there is only one active connection at a time
+
+        user_api = ldap.UserApi(CONF)
+        u1_dn = user_api._id_to_dn_string(user1['id'])
+        u2_dn = user_api._id_to_dn_string(user2['id'])
+        u3_dn = user_api._id_to_dn_string(user3['id'])
+        u4_dn = user_api._id_to_dn_string(user4['id'])
+        u5_dn = user_api._id_to_dn_string(user5['id'])
+
+        # now create multiple active connections for end user auth case which
+        # will force to keep them in pool. After that, modify one of user
+        # password. Need to make sure that user connection is in middle
+        # of pool list.
+        auth_cm = self._get_auth_conn_pool_cm()
+        with auth_cm.connection(u1_dn, password) as _:
+            with auth_cm.connection(u2_dn, password) as _:
+                with auth_cm.connection(u3_dn, password) as _:
+                    with auth_cm.connection(u4_dn, password) as _:
+                        with auth_cm.connection(u5_dn, password) as _:
+                            self.assertEqual(5, len(auth_cm))
+                            _.unbind_s()
+
+        user3['password'] = new_password
+        self.identity_api.update_user(user3['id'], user3)
+
+        return user3
+
+    def test_password_change_with_auth_pool_enabled_long_lifetime(self):
+        self.config_fixture.config(group='ldap',
+                                   auth_pool_connection_lifetime=600)
+        old_password = 'my_password'
+        new_password = 'new_password'
+        user = self._do_password_change_for_one_user(old_password,
+                                                     new_password)
+        user.pop('password')
+
+        # with long connection lifetime auth_pool can bind to old password
+        # successfully which is not desired if password change is frequent
+        # use case in a deployment.
+        # This can happen in multiple concurrent connections case only.
+        user_ref = self.identity_api.authenticate(
+            context={}, user_id=user['id'], password=old_password)
+
+        self.assertDictEqual(user_ref, user)
+
+    def test_password_change_with_auth_pool_enabled_no_lifetime(self):
+        self.config_fixture.config(group='ldap',
+                                   auth_pool_connection_lifetime=0)
+
+        old_password = 'my_password'
+        new_password = 'new_password'
+        user = self._do_password_change_for_one_user(old_password,
+                                                     new_password)
+        # now as connection lifetime is zero, so authentication
+        # with old password will always fail.
+        self.assertRaises(AssertionError,
+                          self.identity_api.authenticate,
+                          context={}, user_id=user['id'],
+                          password=old_password)
diff --git a/keystone-moon/keystone/tests/unit/test_ldap_tls_livetest.py b/keystone-moon/keystone/tests/unit/test_ldap_tls_livetest.py
new file mode 100644 (file)
index 0000000..d79c2ba
--- /dev/null
@@ -0,0 +1,122 @@
+# Copyright 2013 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import ldap
+import ldap.modlist
+from oslo_config import cfg
+
+from keystone import exception
+from keystone import identity
+from keystone.tests import unit as tests
+from keystone.tests.unit import test_ldap_livetest
+
+
+CONF = cfg.CONF
+
+
+def create_object(dn, attrs):
+    conn = ldap.initialize(CONF.ldap.url)
+    conn.simple_bind_s(CONF.ldap.user, CONF.ldap.password)
+    ldif = ldap.modlist.addModlist(attrs)
+    conn.add_s(dn, ldif)
+    conn.unbind_s()
+
+
+class LiveTLSLDAPIdentity(test_ldap_livetest.LiveLDAPIdentity):
+
+    def _ldap_skip_live(self):
+        self.skip_if_env_not_set('ENABLE_TLS_LDAP_LIVE_TEST')
+
+    def config_files(self):
+        config_files = super(LiveTLSLDAPIdentity, self).config_files()
+        config_files.append(tests.dirs.tests_conf('backend_tls_liveldap.conf'))
+        return config_files
+
+    def config_overrides(self):
+        super(LiveTLSLDAPIdentity, self).config_overrides()
+        self.config_fixture.config(
+            group='identity',
+            driver='keystone.identity.backends.ldap.Identity')
+
+    def test_tls_certfile_demand_option(self):
+        self.config_fixture.config(group='ldap',
+                                   use_tls=True,
+                                   tls_cacertdir=None,
+                                   tls_req_cert='demand')
+        self.identity_api = identity.backends.ldap.Identity()
+
+        user = {'name': 'fake1',
+                'password': 'fakepass1',
+                'tenants': ['bar']}
+        user = self.identity_api.create_user('user')
+        user_ref = self.identity_api.get_user(user['id'])
+        self.assertEqual(user['id'], user_ref['id'])
+
+        user['password'] = 'fakepass2'
+        self.identity_api.update_user(user['id'], user)
+
+        self.identity_api.delete_user(user['id'])
+        self.assertRaises(exception.UserNotFound, self.identity_api.get_user,
+                          user['id'])
+
+    def test_tls_certdir_demand_option(self):
+        self.config_fixture.config(group='ldap',
+                                   use_tls=True,
+                                   tls_cacertdir=None,
+                                   tls_req_cert='demand')
+        self.identity_api = identity.backends.ldap.Identity()
+
+        user = {'id': 'fake1',
+                'name': 'fake1',
+                'password': 'fakepass1',
+                'tenants': ['bar']}
+        self.identity_api.create_user('fake1', user)
+        user_ref = self.identity_api.get_user('fake1')
+        self.assertEqual('fake1', user_ref['id'])
+
+        user['password'] = 'fakepass2'
+        self.identity_api.update_user('fake1', user)
+
+        self.identity_api.delete_user('fake1')
+        self.assertRaises(exception.UserNotFound, self.identity_api.get_user,
+                          'fake1')
+
+    def test_tls_bad_certfile(self):
+        self.config_fixture.config(
+            group='ldap',
+            use_tls=True,
+            tls_req_cert='demand',
+            tls_cacertfile='/etc/keystone/ssl/certs/mythicalcert.pem',
+            tls_cacertdir=None)
+        self.identity_api = identity.backends.ldap.Identity()
+
+        user = {'name': 'fake1',
+                'password': 'fakepass1',
+                'tenants': ['bar']}
+        self.assertRaises(IOError, self.identity_api.create_user, user)
+
+    def test_tls_bad_certdir(self):
+        self.config_fixture.config(
+            group='ldap',
+            use_tls=True,
+            tls_cacertfile=None,
+            tls_req_cert='demand',
+            tls_cacertdir='/etc/keystone/ssl/mythicalcertdir')
+        self.identity_api = identity.backends.ldap.Identity()
+
+        user = {'name': 'fake1',
+                'password': 'fakepass1',
+                'tenants': ['bar']}
+        self.assertRaises(IOError, self.identity_api.create_user, user)
diff --git a/keystone-moon/keystone/tests/unit/test_middleware.py b/keystone-moon/keystone/tests/unit/test_middleware.py
new file mode 100644 (file)
index 0000000..3a26dd2
--- /dev/null
@@ -0,0 +1,119 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+import webob
+
+from keystone import middleware
+from keystone.tests import unit as tests
+
+
+CONF = cfg.CONF
+
+
+def make_request(**kwargs):
+    accept = kwargs.pop('accept', None)
+    method = kwargs.pop('method', 'GET')
+    body = kwargs.pop('body', None)
+    req = webob.Request.blank('/', **kwargs)
+    req.method = method
+    if body is not None:
+        req.body = body
+    if accept is not None:
+        req.accept = accept
+    return req
+
+
+def make_response(**kwargs):
+    body = kwargs.pop('body', None)
+    return webob.Response(body)
+
+
+class TokenAuthMiddlewareTest(tests.TestCase):
+    def test_request(self):
+        req = make_request()
+        req.headers[middleware.AUTH_TOKEN_HEADER] = 'MAGIC'
+        middleware.TokenAuthMiddleware(None).process_request(req)
+        context = req.environ[middleware.CONTEXT_ENV]
+        self.assertEqual('MAGIC', context['token_id'])
+
+
+class AdminTokenAuthMiddlewareTest(tests.TestCase):
+    def test_request_admin(self):
+        req = make_request()
+        req.headers[middleware.AUTH_TOKEN_HEADER] = CONF.admin_token
+        middleware.AdminTokenAuthMiddleware(None).process_request(req)
+        context = req.environ[middleware.CONTEXT_ENV]
+        self.assertTrue(context['is_admin'])
+
+    def test_request_non_admin(self):
+        req = make_request()
+        req.headers[middleware.AUTH_TOKEN_HEADER] = 'NOT-ADMIN'
+        middleware.AdminTokenAuthMiddleware(None).process_request(req)
+        context = req.environ[middleware.CONTEXT_ENV]
+        self.assertFalse(context['is_admin'])
+
+
+class PostParamsMiddlewareTest(tests.TestCase):
+    def test_request_with_params(self):
+        req = make_request(body="arg1=one", method='POST')
+        middleware.PostParamsMiddleware(None).process_request(req)
+        params = req.environ[middleware.PARAMS_ENV]
+        self.assertEqual({"arg1": "one"}, params)
+
+
+class JsonBodyMiddlewareTest(tests.TestCase):
+    def test_request_with_params(self):
+        req = make_request(body='{"arg1": "one", "arg2": ["a"]}',
+                           content_type='application/json',
+                           method='POST')
+        middleware.JsonBodyMiddleware(None).process_request(req)
+        params = req.environ[middleware.PARAMS_ENV]
+        self.assertEqual({"arg1": "one", "arg2": ["a"]}, params)
+
+    def test_malformed_json(self):
+        req = make_request(body='{"arg1": "on',
+                           content_type='application/json',
+                           method='POST')
+        resp = middleware.JsonBodyMiddleware(None).process_request(req)
+        self.assertEqual(400, resp.status_int)
+
+    def test_not_dict_body(self):
+        req = make_request(body='42',
+                           content_type='application/json',
+                           method='POST')
+        resp = middleware.JsonBodyMiddleware(None).process_request(req)
+        self.assertEqual(400, resp.status_int)
+        self.assertTrue('valid JSON object' in resp.json['error']['message'])
+
+    def test_no_content_type(self):
+        req = make_request(body='{"arg1": "one", "arg2": ["a"]}',
+                           method='POST')
+        middleware.JsonBodyMiddleware(None).process_request(req)
+        params = req.environ[middleware.PARAMS_ENV]
+        self.assertEqual({"arg1": "one", "arg2": ["a"]}, params)
+
+    def test_unrecognized_content_type(self):
+        req = make_request(body='{"arg1": "one", "arg2": ["a"]}',
+                           content_type='text/plain',
+                           method='POST')
+        resp = middleware.JsonBodyMiddleware(None).process_request(req)
+        self.assertEqual(400, resp.status_int)
+
+    def test_unrecognized_content_type_without_body(self):
+        req = make_request(content_type='text/plain',
+                           method='GET')
+        middleware.JsonBodyMiddleware(None).process_request(req)
+        params = req.environ.get(middleware.PARAMS_ENV, {})
+        self.assertEqual({}, params)
diff --git a/keystone-moon/keystone/tests/unit/test_no_admin_token_auth.py b/keystone-moon/keystone/tests/unit/test_no_admin_token_auth.py
new file mode 100644 (file)
index 0000000..9f67fbd
--- /dev/null
@@ -0,0 +1,59 @@
+# Copyright 2013 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+
+import webtest
+
+from keystone.tests import unit as tests
+
+
+class TestNoAdminTokenAuth(tests.TestCase):
+    def setUp(self):
+        super(TestNoAdminTokenAuth, self).setUp()
+        self.load_backends()
+
+        self._generate_paste_config()
+
+        self.admin_app = webtest.TestApp(
+            self.loadapp(tests.dirs.tmp('no_admin_token_auth'), name='admin'),
+            extra_environ=dict(REMOTE_ADDR='127.0.0.1'))
+        self.addCleanup(setattr, self, 'admin_app', None)
+
+    def _generate_paste_config(self):
+        # Generate a file, based on keystone-paste.ini, that doesn't include
+        # admin_token_auth in the pipeline
+
+        with open(tests.dirs.etc('keystone-paste.ini'), 'r') as f:
+            contents = f.read()
+
+        new_contents = contents.replace(' admin_token_auth ', ' ')
+
+        filename = tests.dirs.tmp('no_admin_token_auth-paste.ini')
+        with open(filename, 'w') as f:
+            f.write(new_contents)
+        self.addCleanup(os.remove, filename)
+
+    def test_request_no_admin_token_auth(self):
+        # This test verifies that if the admin_token_auth middleware isn't
+        # in the paste pipeline that users can still make requests.
+
+        # Note(blk-u): Picked /v2.0/tenants because it's an operation that
+        # requires is_admin in the context, any operation that requires
+        # is_admin would work for this test.
+        REQ_PATH = '/v2.0/tenants'
+
+        # If the following does not raise, then the test is successful.
+        self.admin_app.get(REQ_PATH, headers={'X-Auth-Token': 'NotAdminToken'},
+                           status=401)
diff --git a/keystone-moon/keystone/tests/unit/test_policy.py b/keystone-moon/keystone/tests/unit/test_policy.py
new file mode 100644 (file)
index 0000000..2c0c399
--- /dev/null
@@ -0,0 +1,228 @@
+# Copyright 2011 Piston Cloud Computing, Inc.
+# All Rights Reserved.
+
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+
+import mock
+from oslo_policy import policy as common_policy
+import six
+from six.moves.urllib import request as urlrequest
+from testtools import matchers
+
+from keystone import exception
+from keystone.policy.backends import rules
+from keystone.tests import unit as tests
+from keystone.tests.unit.ksfixtures import temporaryfile
+
+
+class BasePolicyTestCase(tests.TestCase):
+    def setUp(self):
+        super(BasePolicyTestCase, self).setUp()
+        rules.reset()
+        self.addCleanup(rules.reset)
+        self.addCleanup(self.clear_cache_safely)
+
+    def clear_cache_safely(self):
+        if rules._ENFORCER:
+            rules._ENFORCER.clear()
+
+
+class PolicyFileTestCase(BasePolicyTestCase):
+    def setUp(self):
+        # self.tmpfilename should exist before setUp super is called
+        # this is to ensure it is available for the config_fixture in
+        # the config_overrides call.
+        self.tempfile = self.useFixture(temporaryfile.SecureTempFile())
+        self.tmpfilename = self.tempfile.file_name
+        super(PolicyFileTestCase, self).setUp()
+        self.target = {}
+
+    def config_overrides(self):
+        super(PolicyFileTestCase, self).config_overrides()
+        self.config_fixture.config(group='oslo_policy',
+                                   policy_file=self.tmpfilename)
+
+    def test_modified_policy_reloads(self):
+        action = "example:test"
+        empty_credentials = {}
+        with open(self.tmpfilename, "w") as policyfile:
+            policyfile.write("""{"example:test": []}""")
+        rules.enforce(empty_credentials, action, self.target)
+        with open(self.tmpfilename, "w") as policyfile:
+            policyfile.write("""{"example:test": ["false:false"]}""")
+        rules._ENFORCER.clear()
+        self.assertRaises(exception.ForbiddenAction, rules.enforce,
+                          empty_credentials, action, self.target)
+
+    def test_invalid_policy_raises_error(self):
+        action = "example:test"
+        empty_credentials = {}
+        invalid_json = '{"example:test": [],}'
+        with open(self.tmpfilename, "w") as policyfile:
+            policyfile.write(invalid_json)
+        self.assertRaises(ValueError, rules.enforce,
+                          empty_credentials, action, self.target)
+
+
+class PolicyTestCase(BasePolicyTestCase):
+    def setUp(self):
+        super(PolicyTestCase, self).setUp()
+        # NOTE(vish): preload rules to circumvent reloading from file
+        rules.init()
+        self.rules = {
+            "true": [],
+            "example:allowed": [],
+            "example:denied": [["false:false"]],
+            "example:get_http": [["http:http://www.example.com"]],
+            "example:my_file": [["role:compute_admin"],
+                                ["project_id:%(project_id)s"]],
+            "example:early_and_fail": [["false:false", "rule:true"]],
+            "example:early_or_success": [["rule:true"], ["false:false"]],
+            "example:lowercase_admin": [["role:admin"], ["role:sysadmin"]],
+            "example:uppercase_admin": [["role:ADMIN"], ["role:sysadmin"]],
+        }
+
+        # NOTE(vish): then overload underlying policy engine
+        self._set_rules()
+        self.credentials = {}
+        self.target = {}
+
+    def _set_rules(self):
+        these_rules = common_policy.Rules.from_dict(self.rules)
+        rules._ENFORCER.set_rules(these_rules)
+
+    def test_enforce_nonexistent_action_throws(self):
+        action = "example:noexist"
+        self.assertRaises(exception.ForbiddenAction, rules.enforce,
+                          self.credentials, action, self.target)
+
+    def test_enforce_bad_action_throws(self):
+        action = "example:denied"
+        self.assertRaises(exception.ForbiddenAction, rules.enforce,
+                          self.credentials, action, self.target)
+
+    def test_enforce_good_action(self):
+        action = "example:allowed"
+        rules.enforce(self.credentials, action, self.target)
+
+    def test_enforce_http_true(self):
+
+        def fakeurlopen(url, post_data):
+            return six.StringIO("True")
+
+        action = "example:get_http"
+        target = {}
+        with mock.patch.object(urlrequest, 'urlopen', fakeurlopen):
+            result = rules.enforce(self.credentials, action, target)
+        self.assertTrue(result)
+
+    def test_enforce_http_false(self):
+
+        def fakeurlopen(url, post_data):
+            return six.StringIO("False")
+
+        action = "example:get_http"
+        target = {}
+        with mock.patch.object(urlrequest, 'urlopen', fakeurlopen):
+            self.assertRaises(exception.ForbiddenAction, rules.enforce,
+                              self.credentials, action, target)
+
+    def test_templatized_enforcement(self):
+        target_mine = {'project_id': 'fake'}
+        target_not_mine = {'project_id': 'another'}
+        credentials = {'project_id': 'fake', 'roles': []}
+        action = "example:my_file"
+        rules.enforce(credentials, action, target_mine)
+        self.assertRaises(exception.ForbiddenAction, rules.enforce,
+                          credentials, action, target_not_mine)
+
+    def test_early_AND_enforcement(self):
+        action = "example:early_and_fail"
+        self.assertRaises(exception.ForbiddenAction, rules.enforce,
+                          self.credentials, action, self.target)
+
+    def test_early_OR_enforcement(self):
+        action = "example:early_or_success"
+        rules.enforce(self.credentials, action, self.target)
+
+    def test_ignore_case_role_check(self):
+        lowercase_action = "example:lowercase_admin"
+        uppercase_action = "example:uppercase_admin"
+        # NOTE(dprince) we mix case in the Admin role here to ensure
+        # case is ignored
+        admin_credentials = {'roles': ['AdMiN']}
+        rules.enforce(admin_credentials, lowercase_action, self.target)
+        rules.enforce(admin_credentials, uppercase_action, self.target)
+
+
+class DefaultPolicyTestCase(BasePolicyTestCase):
+    def setUp(self):
+        super(DefaultPolicyTestCase, self).setUp()
+        rules.init()
+
+        self.rules = {
+            "default": [],
+            "example:exist": [["false:false"]]
+        }
+        self._set_rules('default')
+        self.credentials = {}
+
+        # FIXME(gyee): latest Oslo policy Enforcer class reloads the rules in
+        # its enforce() method even though rules has been initialized via
+        # set_rules(). To make it easier to do our tests, we're going to
+        # monkeypatch load_roles() so it does nothing. This seem like a bug in
+        # Oslo policy as we shoudn't have to reload the rules if they have
+        # already been set using set_rules().
+        self._old_load_rules = rules._ENFORCER.load_rules
+        self.addCleanup(setattr, rules._ENFORCER, 'load_rules',
+                        self._old_load_rules)
+        rules._ENFORCER.load_rules = lambda *args, **kwargs: None
+
+    def _set_rules(self, default_rule):
+        these_rules = common_policy.Rules.from_dict(self.rules, default_rule)
+        rules._ENFORCER.set_rules(these_rules)
+
+    def test_policy_called(self):
+        self.assertRaises(exception.ForbiddenAction, rules.enforce,
+                          self.credentials, "example:exist", {})
+
+    def test_not_found_policy_calls_default(self):
+        rules.enforce(self.credentials, "example:noexist", {})
+
+    def test_default_not_found(self):
+        new_default_rule = "default_noexist"
+        # FIXME(gyee): need to overwrite the Enforcer's default_rule first
+        # as it is recreating the rules with its own default_rule instead
+        # of the default_rule passed in from set_rules(). I think this is a
+        # bug in Oslo policy.
+        rules._ENFORCER.default_rule = new_default_rule
+        self._set_rules(new_default_rule)
+        self.assertRaises(exception.ForbiddenAction, rules.enforce,
+                          self.credentials, "example:noexist", {})
+
+
+class PolicyJsonTestCase(tests.TestCase):
+
+    def _load_entries(self, filename):
+        return set(json.load(open(filename)))
+
+    def test_json_examples_have_matching_entries(self):
+        policy_keys = self._load_entries(tests.dirs.etc('policy.json'))
+        cloud_policy_keys = self._load_entries(
+            tests.dirs.etc('policy.v3cloudsample.json'))
+
+        diffs = set(policy_keys).difference(set(cloud_policy_keys))
+
+        self.assertThat(diffs, matchers.Equals(set()))
diff --git a/keystone-moon/keystone/tests/unit/test_revoke.py b/keystone-moon/keystone/tests/unit/test_revoke.py
new file mode 100644 (file)
index 0000000..727eff7
--- /dev/null
@@ -0,0 +1,637 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import datetime
+import uuid
+
+import mock
+from oslo_utils import timeutils
+from testtools import matchers
+
+from keystone.contrib.revoke import model
+from keystone import exception
+from keystone.tests import unit as tests
+from keystone.tests.unit import test_backend_sql
+from keystone.token import provider
+
+
+def _new_id():
+    return uuid.uuid4().hex
+
+
+def _future_time():
+    expire_delta = datetime.timedelta(seconds=1000)
+    future_time = timeutils.utcnow() + expire_delta
+    return future_time
+
+
+def _past_time():
+    expire_delta = datetime.timedelta(days=-1000)
+    past_time = timeutils.utcnow() + expire_delta
+    return past_time
+
+
+def _sample_blank_token():
+    issued_delta = datetime.timedelta(minutes=-2)
+    issued_at = timeutils.utcnow() + issued_delta
+    token_data = model.blank_token_data(issued_at)
+    return token_data
+
+
+def _matches(event, token_values):
+    """See if the token matches the revocation event.
+
+    Used as a secondary check on the logic to Check
+    By Tree Below:  This is abrute force approach to checking.
+    Compare each attribute from the event with the corresponding
+    value from the token.  If the event does not have a value for
+    the attribute, a match is still possible.  If the event has a
+    value for the attribute, and it does not match the token, no match
+    is possible, so skip the remaining checks.
+
+    :param event one revocation event to match
+    :param token_values dictionary with set of values taken from the
+    token
+    :returns if the token matches the revocation event, indicating the
+    token has been revoked
+    """
+
+    # The token has three attributes that can match the user_id
+    if event.user_id is not None:
+        for attribute_name in ['user_id', 'trustor_id', 'trustee_id']:
+            if event.user_id == token_values[attribute_name]:
+                break
+        else:
+            return False
+
+    # The token has two attributes that can match the domain_id
+    if event.domain_id is not None:
+        for attribute_name in ['identity_domain_id', 'assignment_domain_id']:
+            if event.domain_id == token_values[attribute_name]:
+                break
+        else:
+            return False
+
+    if event.domain_scope_id is not None:
+        if event.domain_scope_id != token_values['assignment_domain_id']:
+            return False
+
+    # If any one check does not match, the while token does
+    # not match the event. The numerous return False indicate
+    # that the token is still valid and short-circuits the
+    # rest of the logic.
+    attribute_names = ['project_id',
+                       'expires_at', 'trust_id', 'consumer_id',
+                       'access_token_id', 'audit_id', 'audit_chain_id']
+    for attribute_name in attribute_names:
+        if getattr(event, attribute_name) is not None:
+            if (getattr(event, attribute_name) !=
+                    token_values[attribute_name]):
+                        return False
+
+    if event.role_id is not None:
+        roles = token_values['roles']
+        for role in roles:
+            if event.role_id == role:
+                break
+        else:
+            return False
+    if token_values['issued_at'] > event.issued_before:
+        return False
+    return True
+
+
+class RevokeTests(object):
+    def test_list(self):
+        self.revoke_api.revoke_by_user(user_id=1)
+        self.assertEqual(1, len(self.revoke_api.list_events()))
+
+        self.revoke_api.revoke_by_user(user_id=2)
+        self.assertEqual(2, len(self.revoke_api.list_events()))
+
+    def test_list_since(self):
+        self.revoke_api.revoke_by_user(user_id=1)
+        self.revoke_api.revoke_by_user(user_id=2)
+        past = timeutils.utcnow() - datetime.timedelta(seconds=1000)
+        self.assertEqual(2, len(self.revoke_api.list_events(past)))
+        future = timeutils.utcnow() + datetime.timedelta(seconds=1000)
+        self.assertEqual(0, len(self.revoke_api.list_events(future)))
+
+    def test_past_expiry_are_removed(self):
+        user_id = 1
+        self.revoke_api.revoke_by_expiration(user_id, _future_time())
+        self.assertEqual(1, len(self.revoke_api.list_events()))
+        event = model.RevokeEvent()
+        event.revoked_at = _past_time()
+        self.revoke_api.revoke(event)
+        self.assertEqual(1, len(self.revoke_api.list_events()))
+
+    @mock.patch.object(timeutils, 'utcnow')
+    def test_expired_events_removed_validate_token_success(self, mock_utcnow):
+        def _sample_token_values():
+            token = _sample_blank_token()
+            token['expires_at'] = timeutils.isotime(_future_time(),
+                                                    subsecond=True)
+            return token
+
+        now = datetime.datetime.utcnow()
+        now_plus_2h = now + datetime.timedelta(hours=2)
+        mock_utcnow.return_value = now
+
+        # Build a token and validate it. This will seed the cache for the
+        # future 'synchronize' call.
+        token_values = _sample_token_values()
+
+        user_id = _new_id()
+        self.revoke_api.revoke_by_user(user_id)
+        token_values['user_id'] = user_id
+        self.assertRaises(exception.TokenNotFound,
+                          self.revoke_api.check_token,
+                          token_values)
+
+        # Move our clock forward by 2h, build a new token and validate it.
+        # 'synchronize' should now be exercised and remove old expired events
+        mock_utcnow.return_value = now_plus_2h
+        self.revoke_api.revoke_by_expiration(_new_id(), now_plus_2h)
+        # should no longer throw an exception
+        self.revoke_api.check_token(token_values)
+
+    def test_revoke_by_expiration_project_and_domain_fails(self):
+        user_id = _new_id()
+        expires_at = timeutils.isotime(_future_time(), subsecond=True)
+        domain_id = _new_id()
+        project_id = _new_id()
+        self.assertThat(
+            lambda: self.revoke_api.revoke_by_expiration(
+                user_id, expires_at, domain_id=domain_id,
+                project_id=project_id),
+            matchers.raises(exception.UnexpectedError))
+
+
+class SqlRevokeTests(test_backend_sql.SqlTests, RevokeTests):
+    def config_overrides(self):
+        super(SqlRevokeTests, self).config_overrides()
+        self.config_fixture.config(
+            group='revoke',
+            driver='keystone.contrib.revoke.backends.sql.Revoke')
+        self.config_fixture.config(
+            group='token',
+            provider='keystone.token.providers.pki.Provider',
+            revoke_by_id=False)
+
+
+class KvsRevokeTests(tests.TestCase, RevokeTests):
+    def config_overrides(self):
+        super(KvsRevokeTests, self).config_overrides()
+        self.config_fixture.config(
+            group='revoke',
+            driver='keystone.contrib.revoke.backends.kvs.Revoke')
+        self.config_fixture.config(
+            group='token',
+            provider='keystone.token.providers.pki.Provider',
+            revoke_by_id=False)
+
+    def setUp(self):
+        super(KvsRevokeTests, self).setUp()
+        self.load_backends()
+
+
+class RevokeTreeTests(tests.TestCase):
+    def setUp(self):
+        super(RevokeTreeTests, self).setUp()
+        self.events = []
+        self.tree = model.RevokeTree()
+        self._sample_data()
+
+    def _sample_data(self):
+        user_ids = []
+        project_ids = []
+        role_ids = []
+        for i in range(0, 3):
+            user_ids.append(_new_id())
+            project_ids.append(_new_id())
+            role_ids.append(_new_id())
+
+        project_tokens = []
+        i = len(project_tokens)
+        project_tokens.append(_sample_blank_token())
+        project_tokens[i]['user_id'] = user_ids[0]
+        project_tokens[i]['project_id'] = project_ids[0]
+        project_tokens[i]['roles'] = [role_ids[1]]
+
+        i = len(project_tokens)
+        project_tokens.append(_sample_blank_token())
+        project_tokens[i]['user_id'] = user_ids[1]
+        project_tokens[i]['project_id'] = project_ids[0]
+        project_tokens[i]['roles'] = [role_ids[0]]
+
+        i = len(project_tokens)
+        project_tokens.append(_sample_blank_token())
+        project_tokens[i]['user_id'] = user_ids[0]
+        project_tokens[i]['project_id'] = project_ids[1]
+        project_tokens[i]['roles'] = [role_ids[0]]
+
+        token_to_revoke = _sample_blank_token()
+        token_to_revoke['user_id'] = user_ids[0]
+        token_to_revoke['project_id'] = project_ids[0]
+        token_to_revoke['roles'] = [role_ids[0]]
+
+        self.project_tokens = project_tokens
+        self.user_ids = user_ids
+        self.project_ids = project_ids
+        self.role_ids = role_ids
+        self.token_to_revoke = token_to_revoke
+
+    def _assertTokenRevoked(self, token_data):
+        self.assertTrue(any([_matches(e, token_data) for e in self.events]))
+        return self.assertTrue(self.tree.is_revoked(token_data),
+                               'Token should be revoked')
+
+    def _assertTokenNotRevoked(self, token_data):
+        self.assertFalse(any([_matches(e, token_data) for e in self.events]))
+        return self.assertFalse(self.tree.is_revoked(token_data),
+                                'Token should not be revoked')
+
+    def _revoke_by_user(self, user_id):
+        return self.tree.add_event(
+            model.RevokeEvent(user_id=user_id))
+
+    def _revoke_by_audit_id(self, audit_id):
+        event = self.tree.add_event(
+            model.RevokeEvent(audit_id=audit_id))
+        self.events.append(event)
+        return event
+
+    def _revoke_by_audit_chain_id(self, audit_chain_id, project_id=None,
+                                  domain_id=None):
+        event = self.tree.add_event(
+            model.RevokeEvent(audit_chain_id=audit_chain_id,
+                              project_id=project_id,
+                              domain_id=domain_id)
+        )
+        self.events.append(event)
+        return event
+
+    def _revoke_by_expiration(self, user_id, expires_at, project_id=None,
+                              domain_id=None):
+        event = self.tree.add_event(
+            model.RevokeEvent(user_id=user_id,
+                              expires_at=expires_at,
+                              project_id=project_id,
+                              domain_id=domain_id))
+        self.events.append(event)
+        return event
+
+    def _revoke_by_grant(self, role_id, user_id=None,
+                         domain_id=None, project_id=None):
+        event = self.tree.add_event(
+            model.RevokeEvent(user_id=user_id,
+                              role_id=role_id,
+                              domain_id=domain_id,
+                              project_id=project_id))
+        self.events.append(event)
+        return event
+
+    def _revoke_by_user_and_project(self, user_id, project_id):
+        event = self.tree.add_event(
+            model.RevokeEvent(project_id=project_id,
+                              user_id=user_id))
+        self.events.append(event)
+        return event
+
+    def _revoke_by_project_role_assignment(self, project_id, role_id):
+        event = self.tree.add_event(
+            model.RevokeEvent(project_id=project_id,
+                              role_id=role_id))
+        self.events.append(event)
+        return event
+
+    def _revoke_by_domain_role_assignment(self, domain_id, role_id):
+        event = self.tree.add_event(
+            model.RevokeEvent(domain_id=domain_id,
+                              role_id=role_id))
+        self.events.append(event)
+        return event
+
+    def _revoke_by_domain(self, domain_id):
+        event = self.tree.add_event(model.RevokeEvent(domain_id=domain_id))
+        self.events.append(event)
+
+    def _user_field_test(self, field_name):
+        user_id = _new_id()
+        event = self._revoke_by_user(user_id)
+        self.events.append(event)
+        token_data_u1 = _sample_blank_token()
+        token_data_u1[field_name] = user_id
+        self._assertTokenRevoked(token_data_u1)
+        token_data_u2 = _sample_blank_token()
+        token_data_u2[field_name] = _new_id()
+        self._assertTokenNotRevoked(token_data_u2)
+        self.tree.remove_event(event)
+        self.events.remove(event)
+        self._assertTokenNotRevoked(token_data_u1)
+
+    def test_revoke_by_user(self):
+        self._user_field_test('user_id')
+
+    def test_revoke_by_user_matches_trustee(self):
+        self._user_field_test('trustee_id')
+
+    def test_revoke_by_user_matches_trustor(self):
+        self._user_field_test('trustor_id')
+
+    def test_by_user_expiration(self):
+        future_time = _future_time()
+
+        user_id = 1
+        event = self._revoke_by_expiration(user_id, future_time)
+        token_data_1 = _sample_blank_token()
+        token_data_1['user_id'] = user_id
+        token_data_1['expires_at'] = future_time.replace(microsecond=0)
+        self._assertTokenRevoked(token_data_1)
+
+        token_data_2 = _sample_blank_token()
+        token_data_2['user_id'] = user_id
+        expire_delta = datetime.timedelta(seconds=2000)
+        future_time = timeutils.utcnow() + expire_delta
+        token_data_2['expires_at'] = future_time
+        self._assertTokenNotRevoked(token_data_2)
+
+        self.remove_event(event)
+        self._assertTokenNotRevoked(token_data_1)
+
+    def test_revoke_by_audit_id(self):
+        audit_id = provider.audit_info(parent_audit_id=None)[0]
+        token_data_1 = _sample_blank_token()
+        # Audit ID and Audit Chain ID are populated with the same value
+        # if the token is an original token
+        token_data_1['audit_id'] = audit_id
+        token_data_1['audit_chain_id'] = audit_id
+        event = self._revoke_by_audit_id(audit_id)
+        self._assertTokenRevoked(token_data_1)
+
+        audit_id_2 = provider.audit_info(parent_audit_id=audit_id)[0]
+        token_data_2 = _sample_blank_token()
+        token_data_2['audit_id'] = audit_id_2
+        token_data_2['audit_chain_id'] = audit_id
+        self._assertTokenNotRevoked(token_data_2)
+
+        self.remove_event(event)
+        self._assertTokenNotRevoked(token_data_1)
+
+    def test_revoke_by_audit_chain_id(self):
+        audit_id = provider.audit_info(parent_audit_id=None)[0]
+        token_data_1 = _sample_blank_token()
+        # Audit ID and Audit Chain ID are populated with the same value
+        # if the token is an original token
+        token_data_1['audit_id'] = audit_id
+        token_data_1['audit_chain_id'] = audit_id
+        event = self._revoke_by_audit_chain_id(audit_id)
+        self._assertTokenRevoked(token_data_1)
+
+        audit_id_2 = provider.audit_info(parent_audit_id=audit_id)[0]
+        token_data_2 = _sample_blank_token()
+        token_data_2['audit_id'] = audit_id_2
+        token_data_2['audit_chain_id'] = audit_id
+        self._assertTokenRevoked(token_data_2)
+
+        self.remove_event(event)
+        self._assertTokenNotRevoked(token_data_1)
+        self._assertTokenNotRevoked(token_data_2)
+
+    def test_by_user_project(self):
+        # When a user has a project-scoped token and the project-scoped token
+        # is revoked then the token is revoked.
+
+        user_id = _new_id()
+        project_id = _new_id()
+
+        future_time = _future_time()
+
+        token_data = _sample_blank_token()
+        token_data['user_id'] = user_id
+        token_data['project_id'] = project_id
+        token_data['expires_at'] = future_time.replace(microsecond=0)
+
+        self._revoke_by_expiration(user_id, future_time, project_id=project_id)
+        self._assertTokenRevoked(token_data)
+
+    def test_by_user_domain(self):
+        # When a user has a domain-scoped token and the domain-scoped token
+        # is revoked then the token is revoked.
+
+        user_id = _new_id()
+        domain_id = _new_id()
+
+        future_time = _future_time()
+
+        token_data = _sample_blank_token()
+        token_data['user_id'] = user_id
+        token_data['assignment_domain_id'] = domain_id
+        token_data['expires_at'] = future_time.replace(microsecond=0)
+
+        self._revoke_by_expiration(user_id, future_time, domain_id=domain_id)
+        self._assertTokenRevoked(token_data)
+
+    def remove_event(self, event):
+        self.events.remove(event)
+        self.tree.remove_event(event)
+
+    def test_by_project_grant(self):
+        token_to_revoke = self.token_to_revoke
+        tokens = self.project_tokens
+
+        self._assertTokenNotRevoked(token_to_revoke)
+        for token in tokens:
+            self._assertTokenNotRevoked(token)
+
+        event = self._revoke_by_grant(role_id=self.role_ids[0],
+                                      user_id=self.user_ids[0],
+                                      project_id=self.project_ids[0])
+
+        self._assertTokenRevoked(token_to_revoke)
+        for token in tokens:
+            self._assertTokenNotRevoked(token)
+
+        self.remove_event(event)
+
+        self._assertTokenNotRevoked(token_to_revoke)
+        for token in tokens:
+            self._assertTokenNotRevoked(token)
+
+        token_to_revoke['roles'] = [self.role_ids[0],
+                                    self.role_ids[1],
+                                    self.role_ids[2]]
+
+        event = self._revoke_by_grant(role_id=self.role_ids[0],
+                                      user_id=self.user_ids[0],
+                                      project_id=self.project_ids[0])
+        self._assertTokenRevoked(token_to_revoke)
+        self.remove_event(event)
+        self._assertTokenNotRevoked(token_to_revoke)
+
+        event = self._revoke_by_grant(role_id=self.role_ids[1],
+                                      user_id=self.user_ids[0],
+                                      project_id=self.project_ids[0])
+        self._assertTokenRevoked(token_to_revoke)
+        self.remove_event(event)
+        self._assertTokenNotRevoked(token_to_revoke)
+
+        self._revoke_by_grant(role_id=self.role_ids[0],
+                              user_id=self.user_ids[0],
+                              project_id=self.project_ids[0])
+        self._revoke_by_grant(role_id=self.role_ids[1],
+                              user_id=self.user_ids[0],
+                              project_id=self.project_ids[0])
+        self._revoke_by_grant(role_id=self.role_ids[2],
+                              user_id=self.user_ids[0],
+                              project_id=self.project_ids[0])
+        self._assertTokenRevoked(token_to_revoke)
+
+    def test_by_project_and_user_and_role(self):
+        user_id1 = _new_id()
+        user_id2 = _new_id()
+        project_id = _new_id()
+        self.events.append(self._revoke_by_user(user_id1))
+        self.events.append(
+            self._revoke_by_user_and_project(user_id2, project_id))
+        token_data = _sample_blank_token()
+        token_data['user_id'] = user_id2
+        token_data['project_id'] = project_id
+        self._assertTokenRevoked(token_data)
+
+    def test_by_domain_user(self):
+        # If revoke a domain, then a token for a user in the domain is revoked
+
+        user_id = _new_id()
+        domain_id = _new_id()
+
+        token_data = _sample_blank_token()
+        token_data['user_id'] = user_id
+        token_data['identity_domain_id'] = domain_id
+
+        self._revoke_by_domain(domain_id)
+
+        self._assertTokenRevoked(token_data)
+
+    def test_by_domain_project(self):
+        # If revoke a domain, then a token scoped to a project in the domain
+        # is revoked.
+
+        user_id = _new_id()
+        user_domain_id = _new_id()
+
+        project_id = _new_id()
+        project_domain_id = _new_id()
+
+        token_data = _sample_blank_token()
+        token_data['user_id'] = user_id
+        token_data['identity_domain_id'] = user_domain_id
+        token_data['project_id'] = project_id
+        token_data['assignment_domain_id'] = project_domain_id
+
+        self._revoke_by_domain(project_domain_id)
+
+        self._assertTokenRevoked(token_data)
+
+    def test_by_domain_domain(self):
+        # If revoke a domain, then a token scoped to the domain is revoked.
+
+        user_id = _new_id()
+        user_domain_id = _new_id()
+
+        domain_id = _new_id()
+
+        token_data = _sample_blank_token()
+        token_data['user_id'] = user_id
+        token_data['identity_domain_id'] = user_domain_id
+        token_data['assignment_domain_id'] = domain_id
+
+        self._revoke_by_domain(domain_id)
+
+        self._assertTokenRevoked(token_data)
+
+    def _assertEmpty(self, collection):
+        return self.assertEqual(0, len(collection), "collection not empty")
+
+    def _assertEventsMatchIteration(self, turn):
+        self.assertEqual(1, len(self.tree.revoke_map))
+        self.assertEqual(turn + 1, len(self.tree.revoke_map
+                                       ['trust_id=*']
+                                       ['consumer_id=*']
+                                       ['access_token_id=*']
+                                       ['audit_id=*']
+                                       ['audit_chain_id=*']))
+        # two different functions add  domain_ids, +1 for None
+        self.assertEqual(2 * turn + 1, len(self.tree.revoke_map
+                                           ['trust_id=*']
+                                           ['consumer_id=*']
+                                           ['access_token_id=*']
+                                           ['audit_id=*']
+                                           ['audit_chain_id=*']
+                                           ['expires_at=*']))
+        # two different functions add  project_ids, +1 for None
+        self.assertEqual(2 * turn + 1, len(self.tree.revoke_map
+                                           ['trust_id=*']
+                                           ['consumer_id=*']
+                                           ['access_token_id=*']
+                                           ['audit_id=*']
+                                           ['audit_chain_id=*']
+                                           ['expires_at=*']
+                                           ['domain_id=*']))
+        # 10 users added
+        self.assertEqual(turn, len(self.tree.revoke_map
+                                   ['trust_id=*']
+                                   ['consumer_id=*']
+                                   ['access_token_id=*']
+                                   ['audit_id=*']
+                                   ['audit_chain_id=*']
+                                   ['expires_at=*']
+                                   ['domain_id=*']
+                                   ['project_id=*']))
+
+    def test_cleanup(self):
+        events = self.events
+        self._assertEmpty(self.tree.revoke_map)
+        expiry_base_time = _future_time()
+        for i in range(0, 10):
+            events.append(
+                self._revoke_by_user(_new_id()))
+
+            args = (_new_id(),
+                    expiry_base_time + datetime.timedelta(seconds=i))
+            events.append(
+                self._revoke_by_expiration(*args))
+
+            self.assertEqual(i + 2, len(self.tree.revoke_map
+                                        ['trust_id=*']
+                                        ['consumer_id=*']
+                                        ['access_token_id=*']
+                                        ['audit_id=*']
+                                        ['audit_chain_id=*']),
+                             'adding %s to %s' % (args,
+                                                  self.tree.revoke_map))
+
+            events.append(
+                self._revoke_by_project_role_assignment(_new_id(), _new_id()))
+            events.append(
+                self._revoke_by_domain_role_assignment(_new_id(), _new_id()))
+            events.append(
+                self._revoke_by_domain_role_assignment(_new_id(), _new_id()))
+            events.append(
+                self._revoke_by_user_and_project(_new_id(), _new_id()))
+            self._assertEventsMatchIteration(i + 1)
+
+        for event in self.events:
+            self.tree.remove_event(event)
+        self._assertEmpty(self.tree.revoke_map)
diff --git a/keystone-moon/keystone/tests/unit/test_singular_plural.py b/keystone-moon/keystone/tests/unit/test_singular_plural.py
new file mode 100644 (file)
index 0000000..b07ea8d
--- /dev/null
@@ -0,0 +1,48 @@
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import ast
+
+from keystone.contrib.admin_crud import core as admin_crud_core
+from keystone.contrib.s3 import core as s3_core
+from keystone.contrib.user_crud import core as user_crud_core
+from keystone.identity import core as identity_core
+from keystone import service
+
+
+class TestSingularPlural(object):
+    def test_keyword_arg_condition_or_methods(self):
+        """Raise if we see a keyword arg called 'condition' or 'methods'."""
+        modules = [admin_crud_core, s3_core,
+                   user_crud_core, identity_core, service]
+        for module in modules:
+            filename = module.__file__
+            if filename.endswith(".pyc"):
+                # In Python 2, the .py and .pyc files are in the same dir.
+                filename = filename[:-1]
+            with open(filename) as fil:
+                source = fil.read()
+            module = ast.parse(source, filename)
+            last_stmt_or_expr = None
+            for node in ast.walk(module):
+                if isinstance(node, ast.stmt) or isinstance(node, ast.expr):
+                    # keyword nodes don't have line numbers, so we need to
+                    # get that information from the parent stmt or expr.
+                    last_stmt_or_expr = node
+                elif isinstance(node, ast.keyword):
+                    for bad_word in ["condition", "methods"]:
+                        if node.arg == bad_word:
+                            raise AssertionError(
+                                "Suspicious name '%s' at %s line %s" %
+                                (bad_word, filename, last_stmt_or_expr.lineno))
diff --git a/keystone-moon/keystone/tests/unit/test_sql_livetest.py b/keystone-moon/keystone/tests/unit/test_sql_livetest.py
new file mode 100644 (file)
index 0000000..96ee6c7
--- /dev/null
@@ -0,0 +1,73 @@
+# Copyright 2013 Red Hat, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.tests import unit as tests
+from keystone.tests.unit import test_sql_migrate_extensions
+from keystone.tests.unit import test_sql_upgrade
+
+
+class PostgresqlMigrateTests(test_sql_upgrade.SqlUpgradeTests):
+    def setUp(self):
+        self.skip_if_env_not_set('ENABLE_LIVE_POSTGRES_TEST')
+        super(PostgresqlMigrateTests, self).setUp()
+
+    def config_files(self):
+        files = super(PostgresqlMigrateTests, self).config_files()
+        files.append(tests.dirs.tests_conf("backend_postgresql.conf"))
+        return files
+
+
+class MysqlMigrateTests(test_sql_upgrade.SqlUpgradeTests):
+    def setUp(self):
+        self.skip_if_env_not_set('ENABLE_LIVE_MYSQL_TEST')
+        super(MysqlMigrateTests, self).setUp()
+
+    def config_files(self):
+        files = super(MysqlMigrateTests, self).config_files()
+        files.append(tests.dirs.tests_conf("backend_mysql.conf"))
+        return files
+
+
+class PostgresqlRevokeExtensionsTests(
+        test_sql_migrate_extensions.RevokeExtension):
+    def setUp(self):
+        self.skip_if_env_not_set('ENABLE_LIVE_POSTGRES_TEST')
+        super(PostgresqlRevokeExtensionsTests, self).setUp()
+
+    def config_files(self):
+        files = super(PostgresqlRevokeExtensionsTests, self).config_files()
+        files.append(tests.dirs.tests_conf("backend_postgresql.conf"))
+        return files
+
+
+class MysqlRevokeExtensionsTests(test_sql_migrate_extensions.RevokeExtension):
+    def setUp(self):
+        self.skip_if_env_not_set('ENABLE_LIVE_MYSQL_TEST')
+        super(MysqlRevokeExtensionsTests, self).setUp()
+
+    def config_files(self):
+        files = super(MysqlRevokeExtensionsTests, self).config_files()
+        files.append(tests.dirs.tests_conf("backend_mysql.conf"))
+        return files
+
+
+class Db2MigrateTests(test_sql_upgrade.SqlUpgradeTests):
+    def setUp(self):
+        self.skip_if_env_not_set('ENABLE_LIVE_DB2_TEST')
+        super(Db2MigrateTests, self).setUp()
+
+    def config_files(self):
+        files = super(Db2MigrateTests, self).config_files()
+        files.append(tests.dirs.tests_conf("backend_db2.conf"))
+        return files
diff --git a/keystone-moon/keystone/tests/unit/test_sql_migrate_extensions.py b/keystone-moon/keystone/tests/unit/test_sql_migrate_extensions.py
new file mode 100644 (file)
index 0000000..edfb91d
--- /dev/null
@@ -0,0 +1,380 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+To run these tests against a live database:
+
+1. Modify the file `keystone/tests/unit/config_files/backend_sql.conf` to use
+   the connection for your live database.
+2. Set up a blank, live database.
+3. Run the tests using::
+
+    tox -e py27 -- keystone.tests.unit.test_sql_migrate_extensions
+
+WARNING::
+
+   Your database will be wiped.
+
+   Do not do this against a Database with valuable data as
+   all data will be lost.
+"""
+
+import sqlalchemy
+import uuid
+
+from oslo_db import exception as db_exception
+from oslo_db.sqlalchemy import utils
+
+from keystone.contrib import endpoint_filter
+from keystone.contrib import endpoint_policy
+from keystone.contrib import example
+from keystone.contrib import federation
+from keystone.contrib import oauth1
+from keystone.contrib import revoke
+from keystone.tests.unit import test_sql_upgrade
+
+
+class SqlUpgradeExampleExtension(test_sql_upgrade.SqlMigrateBase):
+    def repo_package(self):
+        return example
+
+    def test_upgrade(self):
+        self.assertTableDoesNotExist('example')
+        self.upgrade(1, repository=self.repo_path)
+        self.assertTableColumns('example', ['id', 'type', 'extra'])
+
+    def test_downgrade(self):
+        self.upgrade(1, repository=self.repo_path)
+        self.assertTableColumns('example', ['id', 'type', 'extra'])
+        self.downgrade(0, repository=self.repo_path)
+        self.assertTableDoesNotExist('example')
+
+
+class SqlUpgradeOAuth1Extension(test_sql_upgrade.SqlMigrateBase):
+    def repo_package(self):
+        return oauth1
+
+    def upgrade(self, version):
+        super(SqlUpgradeOAuth1Extension, self).upgrade(
+            version, repository=self.repo_path)
+
+    def downgrade(self, version):
+        super(SqlUpgradeOAuth1Extension, self).downgrade(
+            version, repository=self.repo_path)
+
+    def _assert_v1_3_tables(self):
+        self.assertTableColumns('consumer',
+                                ['id',
+                                 'description',
+                                 'secret',
+                                 'extra'])
+        self.assertTableColumns('request_token',
+                                ['id',
+                                 'request_secret',
+                                 'verifier',
+                                 'authorizing_user_id',
+                                 'requested_project_id',
+                                 'requested_roles',
+                                 'consumer_id',
+                                 'expires_at'])
+        self.assertTableColumns('access_token',
+                                ['id',
+                                 'access_secret',
+                                 'authorizing_user_id',
+                                 'project_id',
+                                 'requested_roles',
+                                 'consumer_id',
+                                 'expires_at'])
+
+    def _assert_v4_later_tables(self):
+        self.assertTableColumns('consumer',
+                                ['id',
+                                 'description',
+                                 'secret',
+                                 'extra'])
+        self.assertTableColumns('request_token',
+                                ['id',
+                                 'request_secret',
+                                 'verifier',
+                                 'authorizing_user_id',
+                                 'requested_project_id',
+                                 'role_ids',
+                                 'consumer_id',
+                                 'expires_at'])
+        self.assertTableColumns('access_token',
+                                ['id',
+                                 'access_secret',
+                                 'authorizing_user_id',
+                                 'project_id',
+                                 'role_ids',
+                                 'consumer_id',
+                                 'expires_at'])
+
+    def test_upgrade(self):
+        self.assertTableDoesNotExist('consumer')
+        self.assertTableDoesNotExist('request_token')
+        self.assertTableDoesNotExist('access_token')
+        self.upgrade(1)
+        self._assert_v1_3_tables()
+
+        # NOTE(blk-u): Migrations 2-3 don't modify the tables in a way that we
+        # can easily test for.
+
+        self.upgrade(4)
+        self._assert_v4_later_tables()
+
+        self.upgrade(5)
+        self._assert_v4_later_tables()
+
+    def test_downgrade(self):
+        self.upgrade(5)
+        self._assert_v4_later_tables()
+        self.downgrade(3)
+        self._assert_v1_3_tables()
+        self.downgrade(1)
+        self._assert_v1_3_tables()
+        self.downgrade(0)
+        self.assertTableDoesNotExist('consumer')
+        self.assertTableDoesNotExist('request_token')
+        self.assertTableDoesNotExist('access_token')
+
+
+class EndpointFilterExtension(test_sql_upgrade.SqlMigrateBase):
+    def repo_package(self):
+        return endpoint_filter
+
+    def upgrade(self, version):
+        super(EndpointFilterExtension, self).upgrade(
+            version, repository=self.repo_path)
+
+    def downgrade(self, version):
+        super(EndpointFilterExtension, self).downgrade(
+            version, repository=self.repo_path)
+
+    def _assert_v1_tables(self):
+        self.assertTableColumns('project_endpoint',
+                                ['endpoint_id', 'project_id'])
+        self.assertTableDoesNotExist('endpoint_group')
+        self.assertTableDoesNotExist('project_endpoint_group')
+
+    def _assert_v2_tables(self):
+        self.assertTableColumns('project_endpoint',
+                                ['endpoint_id', 'project_id'])
+        self.assertTableColumns('endpoint_group',
+                                ['id', 'name', 'description', 'filters'])
+        self.assertTableColumns('project_endpoint_group',
+                                ['endpoint_group_id', 'project_id'])
+
+    def test_upgrade(self):
+        self.assertTableDoesNotExist('project_endpoint')
+        self.upgrade(1)
+        self._assert_v1_tables()
+        self.assertTableColumns('project_endpoint',
+                                ['endpoint_id', 'project_id'])
+        self.upgrade(2)
+        self._assert_v2_tables()
+
+    def test_downgrade(self):
+        self.upgrade(2)
+        self._assert_v2_tables()
+        self.downgrade(1)
+        self._assert_v1_tables()
+        self.downgrade(0)
+        self.assertTableDoesNotExist('project_endpoint')
+
+
+class EndpointPolicyExtension(test_sql_upgrade.SqlMigrateBase):
+    def repo_package(self):
+        return endpoint_policy
+
+    def test_upgrade(self):
+        self.assertTableDoesNotExist('policy_association')
+        self.upgrade(1, repository=self.repo_path)
+        self.assertTableColumns('policy_association',
+                                ['id', 'policy_id', 'endpoint_id',
+                                 'service_id', 'region_id'])
+
+    def test_downgrade(self):
+        self.upgrade(1, repository=self.repo_path)
+        self.assertTableColumns('policy_association',
+                                ['id', 'policy_id', 'endpoint_id',
+                                 'service_id', 'region_id'])
+        self.downgrade(0, repository=self.repo_path)
+        self.assertTableDoesNotExist('policy_association')
+
+
+class FederationExtension(test_sql_upgrade.SqlMigrateBase):
+    """Test class for ensuring the Federation SQL."""
+
+    def setUp(self):
+        super(FederationExtension, self).setUp()
+        self.identity_provider = 'identity_provider'
+        self.federation_protocol = 'federation_protocol'
+        self.service_provider = 'service_provider'
+        self.mapping = 'mapping'
+
+    def repo_package(self):
+        return federation
+
+    def insert_dict(self, session, table_name, d):
+        """Naively inserts key-value pairs into a table, given a dictionary."""
+        table = sqlalchemy.Table(table_name, self.metadata, autoload=True)
+        insert = table.insert().values(**d)
+        session.execute(insert)
+        session.commit()
+
+    def test_upgrade(self):
+        self.assertTableDoesNotExist(self.identity_provider)
+        self.assertTableDoesNotExist(self.federation_protocol)
+        self.assertTableDoesNotExist(self.mapping)
+
+        self.upgrade(1, repository=self.repo_path)
+        self.assertTableColumns(self.identity_provider,
+                                ['id',
+                                 'enabled',
+                                 'description'])
+
+        self.assertTableColumns(self.federation_protocol,
+                                ['id',
+                                 'idp_id',
+                                 'mapping_id'])
+
+        self.upgrade(2, repository=self.repo_path)
+        self.assertTableColumns(self.mapping,
+                                ['id', 'rules'])
+
+        federation_protocol = utils.get_table(
+            self.engine,
+            'federation_protocol')
+        with self.engine.begin() as conn:
+            conn.execute(federation_protocol.insert(), id=0, idp_id=1)
+            self.upgrade(3, repository=self.repo_path)
+            federation_protocol = utils.get_table(
+                self.engine,
+                'federation_protocol')
+            self.assertFalse(federation_protocol.c.mapping_id.nullable)
+
+    def test_downgrade(self):
+        self.upgrade(3, repository=self.repo_path)
+        self.assertTableColumns(self.identity_provider,
+                                ['id', 'enabled', 'description'])
+        self.assertTableColumns(self.federation_protocol,
+                                ['id', 'idp_id', 'mapping_id'])
+        self.assertTableColumns(self.mapping,
+                                ['id', 'rules'])
+
+        self.downgrade(2, repository=self.repo_path)
+        federation_protocol = utils.get_table(
+            self.engine,
+            'federation_protocol')
+        self.assertTrue(federation_protocol.c.mapping_id.nullable)
+
+        self.downgrade(0, repository=self.repo_path)
+        self.assertTableDoesNotExist(self.identity_provider)
+        self.assertTableDoesNotExist(self.federation_protocol)
+        self.assertTableDoesNotExist(self.mapping)
+
+    def test_fixup_service_provider_attributes(self):
+        self.upgrade(6, repository=self.repo_path)
+        self.assertTableColumns(self.service_provider,
+                                ['id', 'description', 'enabled', 'auth_url',
+                                 'sp_url'])
+
+        session = self.Session()
+        sp1 = {'id': uuid.uuid4().hex,
+               'auth_url': None,
+               'sp_url': uuid.uuid4().hex,
+               'description': uuid.uuid4().hex,
+               'enabled': True}
+        sp2 = {'id': uuid.uuid4().hex,
+               'auth_url': uuid.uuid4().hex,
+               'sp_url': None,
+               'description': uuid.uuid4().hex,
+               'enabled': True}
+        sp3 = {'id': uuid.uuid4().hex,
+               'auth_url': None,
+               'sp_url': None,
+               'description': uuid.uuid4().hex,
+               'enabled': True}
+
+        # Insert with 'auth_url' or 'sp_url' set to null must fail
+        self.assertRaises(db_exception.DBError,
+                          self.insert_dict,
+                          session,
+                          self.service_provider,
+                          sp1)
+        self.assertRaises(db_exception.DBError,
+                          self.insert_dict,
+                          session,
+                          self.service_provider,
+                          sp2)
+        self.assertRaises(db_exception.DBError,
+                          self.insert_dict,
+                          session,
+                          self.service_provider,
+                          sp3)
+
+        session.close()
+        self.downgrade(5, repository=self.repo_path)
+        self.assertTableColumns(self.service_provider,
+                                ['id', 'description', 'enabled', 'auth_url',
+                                 'sp_url'])
+        session = self.Session()
+        self.metadata.clear()
+
+        # Before the migration, the table should accept null values
+        self.insert_dict(session, self.service_provider, sp1)
+        self.insert_dict(session, self.service_provider, sp2)
+        self.insert_dict(session, self.service_provider, sp3)
+
+        # Check if null values are updated to empty string when migrating
+        session.close()
+        self.upgrade(6, repository=self.repo_path)
+        sp_table = sqlalchemy.Table(self.service_provider,
+                                    self.metadata,
+                                    autoload=True)
+        session = self.Session()
+        self.metadata.clear()
+
+        sp = session.query(sp_table).filter(sp_table.c.id == sp1['id'])[0]
+        self.assertEqual('', sp.auth_url)
+
+        sp = session.query(sp_table).filter(sp_table.c.id == sp2['id'])[0]
+        self.assertEqual('', sp.sp_url)
+
+        sp = session.query(sp_table).filter(sp_table.c.id == sp3['id'])[0]
+        self.assertEqual('', sp.auth_url)
+        self.assertEqual('', sp.sp_url)
+
+_REVOKE_COLUMN_NAMES = ['id', 'domain_id', 'project_id', 'user_id', 'role_id',
+                        'trust_id', 'consumer_id', 'access_token_id',
+                        'issued_before', 'expires_at', 'revoked_at']
+
+
+class RevokeExtension(test_sql_upgrade.SqlMigrateBase):
+
+    def repo_package(self):
+        return revoke
+
+    def test_upgrade(self):
+        self.assertTableDoesNotExist('revocation_event')
+        self.upgrade(1, repository=self.repo_path)
+        self.assertTableColumns('revocation_event',
+                                _REVOKE_COLUMN_NAMES)
+
+    def test_downgrade(self):
+        self.upgrade(1, repository=self.repo_path)
+        self.assertTableColumns('revocation_event',
+                                _REVOKE_COLUMN_NAMES)
+        self.downgrade(0, repository=self.repo_path)
+        self.assertTableDoesNotExist('revocation_event')
diff --git a/keystone-moon/keystone/tests/unit/test_sql_upgrade.py b/keystone-moon/keystone/tests/unit/test_sql_upgrade.py
new file mode 100644 (file)
index 0000000..e50bad5
--- /dev/null
@@ -0,0 +1,957 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+To run these tests against a live database:
+
+1. Modify the file ``keystone/tests/unit/config_files/backend_sql.conf`` to use
+   the connection for your live database.
+2. Set up a blank, live database
+3. Run the tests using::
+
+    tox -e py27 -- keystone.tests.unit.test_sql_upgrade
+
+WARNING::
+
+    Your database will be wiped.
+
+    Do not do this against a database with valuable data as
+    all data will be lost.
+"""
+
+import copy
+import json
+import uuid
+
+from migrate.versioning import api as versioning_api
+from oslo_config import cfg
+from oslo_db import exception as db_exception
+from oslo_db.sqlalchemy import migration
+from oslo_db.sqlalchemy import session as db_session
+import six
+from sqlalchemy.engine import reflection
+import sqlalchemy.exc
+from sqlalchemy import schema
+
+from keystone.common import sql
+from keystone.common.sql import migrate_repo
+from keystone.common.sql import migration_helpers
+from keystone.contrib import federation
+from keystone.contrib import revoke
+from keystone import exception
+from keystone.tests import unit as tests
+from keystone.tests.unit import default_fixtures
+from keystone.tests.unit.ksfixtures import database
+
+
+CONF = cfg.CONF
+DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
+
+# NOTE(morganfainberg): This should be updated when each DB migration collapse
+# is done to mirror the expected structure of the DB in the format of
+# { <DB_TABLE_NAME>: [<COLUMN>, <COLUMN>, ...], ... }
+INITIAL_TABLE_STRUCTURE = {
+    'credential': [
+        'id', 'user_id', 'project_id', 'blob', 'type', 'extra',
+    ],
+    'domain': [
+        'id', 'name', 'enabled', 'extra',
+    ],
+    'endpoint': [
+        'id', 'legacy_endpoint_id', 'interface', 'region', 'service_id', 'url',
+        'enabled', 'extra',
+    ],
+    'group': [
+        'id', 'domain_id', 'name', 'description', 'extra',
+    ],
+    'policy': [
+        'id', 'type', 'blob', 'extra',
+    ],
+    'project': [
+        'id', 'name', 'extra', 'description', 'enabled', 'domain_id',
+    ],
+    'role': [
+        'id', 'name', 'extra',
+    ],
+    'service': [
+        'id', 'type', 'extra', 'enabled',
+    ],
+    'token': [
+        'id', 'expires', 'extra', 'valid', 'trust_id', 'user_id',
+    ],
+    'trust': [
+        'id', 'trustor_user_id', 'trustee_user_id', 'project_id',
+        'impersonation', 'deleted_at', 'expires_at', 'remaining_uses', 'extra',
+    ],
+    'trust_role': [
+        'trust_id', 'role_id',
+    ],
+    'user': [
+        'id', 'name', 'extra', 'password', 'enabled', 'domain_id',
+        'default_project_id',
+    ],
+    'user_group_membership': [
+        'user_id', 'group_id',
+    ],
+    'region': [
+        'id', 'description', 'parent_region_id', 'extra',
+    ],
+    'assignment': [
+        'type', 'actor_id', 'target_id', 'role_id', 'inherited',
+    ],
+}
+
+
+INITIAL_EXTENSION_TABLE_STRUCTURE = {
+    'revocation_event': [
+        'id', 'domain_id', 'project_id', 'user_id', 'role_id',
+        'trust_id', 'consumer_id', 'access_token_id',
+        'issued_before', 'expires_at', 'revoked_at', 'audit_id',
+        'audit_chain_id',
+    ],
+}
+
+EXTENSIONS = {'federation': federation,
+              'revoke': revoke}
+
+
+class SqlMigrateBase(tests.SQLDriverOverrides, tests.TestCase):
+    def initialize_sql(self):
+        self.metadata = sqlalchemy.MetaData()
+        self.metadata.bind = self.engine
+
+    def config_files(self):
+        config_files = super(SqlMigrateBase, self).config_files()
+        config_files.append(tests.dirs.tests_conf('backend_sql.conf'))
+        return config_files
+
+    def repo_package(self):
+        return sql
+
+    def setUp(self):
+        super(SqlMigrateBase, self).setUp()
+        database.initialize_sql_session()
+        conn_str = CONF.database.connection
+        if (conn_str != tests.IN_MEM_DB_CONN_STRING and
+                conn_str.startswith('sqlite') and
+                conn_str[10:] == tests.DEFAULT_TEST_DB_FILE):
+            # Override the default with a DB that is specific to the migration
+            # tests only if the DB Connection string is the same as the global
+            # default. This is required so that no conflicts occur due to the
+            # global default DB already being under migrate control. This is
+            # only needed if the DB is not-in-memory
+            db_file = tests.dirs.tmp('keystone_migrate_test.db')
+            self.config_fixture.config(
+                group='database',
+                connection='sqlite:///%s' % db_file)
+
+        # create and share a single sqlalchemy engine for testing
+        self.engine = sql.get_engine()
+        self.Session = db_session.get_maker(self.engine, autocommit=False)
+
+        self.initialize_sql()
+        self.repo_path = migration_helpers.find_migrate_repo(
+            self.repo_package())
+        self.schema = versioning_api.ControlledSchema.create(
+            self.engine,
+            self.repo_path, self.initial_db_version)
+
+        # auto-detect the highest available schema version in the migrate_repo
+        self.max_version = self.schema.repository.version().version
+
+    def tearDown(self):
+        sqlalchemy.orm.session.Session.close_all()
+        meta = sqlalchemy.MetaData()
+        meta.bind = self.engine
+        meta.reflect(self.engine)
+
+        with self.engine.begin() as conn:
+            inspector = reflection.Inspector.from_engine(self.engine)
+            metadata = schema.MetaData()
+            tbs = []
+            all_fks = []
+
+            for table_name in inspector.get_table_names():
+                fks = []
+                for fk in inspector.get_foreign_keys(table_name):
+                    if not fk['name']:
+                        continue
+                    fks.append(
+                        schema.ForeignKeyConstraint((), (), name=fk['name']))
+                table = schema.Table(table_name, metadata, *fks)
+                tbs.append(table)
+                all_fks.extend(fks)
+
+            for fkc in all_fks:
+                conn.execute(schema.DropConstraint(fkc))
+
+            for table in tbs:
+                conn.execute(schema.DropTable(table))
+
+        sql.cleanup()
+        super(SqlMigrateBase, self).tearDown()
+
+    def select_table(self, name):
+        table = sqlalchemy.Table(name,
+                                 self.metadata,
+                                 autoload=True)
+        s = sqlalchemy.select([table])
+        return s
+
+    def assertTableExists(self, table_name):
+        try:
+            self.select_table(table_name)
+        except sqlalchemy.exc.NoSuchTableError:
+            raise AssertionError('Table "%s" does not exist' % table_name)
+
+    def assertTableDoesNotExist(self, table_name):
+        """Asserts that a given table exists cannot be selected by name."""
+        # Switch to a different metadata otherwise you might still
+        # detect renamed or dropped tables
+        try:
+            temp_metadata = sqlalchemy.MetaData()
+            temp_metadata.bind = self.engine
+            sqlalchemy.Table(table_name, temp_metadata, autoload=True)
+        except sqlalchemy.exc.NoSuchTableError:
+            pass
+        else:
+            raise AssertionError('Table "%s" already exists' % table_name)
+
+    def upgrade(self, *args, **kwargs):
+        self._migrate(*args, **kwargs)
+
+    def downgrade(self, *args, **kwargs):
+        self._migrate(*args, downgrade=True, **kwargs)
+
+    def _migrate(self, version, repository=None, downgrade=False,
+                 current_schema=None):
+        repository = repository or self.repo_path
+        err = ''
+        version = versioning_api._migrate_version(self.schema,
+                                                  version,
+                                                  not downgrade,
+                                                  err)
+        if not current_schema:
+            current_schema = self.schema
+        changeset = current_schema.changeset(version)
+        for ver, change in changeset:
+            self.schema.runchange(ver, change, changeset.step)
+        self.assertEqual(self.schema.version, version)
+
+    def assertTableColumns(self, table_name, expected_cols):
+        """Asserts that the table contains the expected set of columns."""
+        self.initialize_sql()
+        table = self.select_table(table_name)
+        actual_cols = [col.name for col in table.columns]
+        # Check if the columns are equal, but allow for a different order,
+        # which might occur after an upgrade followed by a downgrade
+        self.assertItemsEqual(expected_cols, actual_cols,
+                              '%s table' % table_name)
+
+    @property
+    def initial_db_version(self):
+        return getattr(self, '_initial_db_version', 0)
+
+
+class SqlUpgradeTests(SqlMigrateBase):
+
+    _initial_db_version = migrate_repo.DB_INIT_VERSION
+
+    def test_blank_db_to_start(self):
+        self.assertTableDoesNotExist('user')
+
+    def test_start_version_db_init_version(self):
+        version = migration.db_version(sql.get_engine(), self.repo_path,
+                                       migrate_repo.DB_INIT_VERSION)
+        self.assertEqual(
+            migrate_repo.DB_INIT_VERSION,
+            version,
+            'DB is not at version %s' % migrate_repo.DB_INIT_VERSION)
+
+    def test_two_steps_forward_one_step_back(self):
+        """You should be able to cleanly undo and re-apply all upgrades.
+
+        Upgrades are run in the following order::
+
+            Starting with the initial version defined at
+            keystone.common.migrate_repo.DB_INIT_VERSION
+
+            INIT +1 -> INIT +2 -> INIT +1 -> INIT +2 -> INIT +3 -> INIT +2 ...
+            ^---------------------^          ^---------------------^
+
+        Downgrade to the DB_INIT_VERSION does not occur based on the
+        requirement that the base version be DB_INIT_VERSION + 1 before
+        migration can occur. Downgrade below DB_INIT_VERSION + 1 is no longer
+        supported.
+
+        DB_INIT_VERSION is the number preceding the release schema version from
+        two releases prior. Example, Juno releases with the DB_INIT_VERSION
+        being 35 where Havana (Havana was two releases before Juno) release
+        schema version is 36.
+
+        The migrate utility requires the db must be initialized under version
+        control with the revision directly before the first version to be
+        applied.
+
+        """
+        for x in range(migrate_repo.DB_INIT_VERSION + 1,
+                       self.max_version + 1):
+            self.upgrade(x)
+            downgrade_ver = x - 1
+            # Don't actually downgrade to the init version. This will raise
+            # a not-implemented error.
+            if downgrade_ver != migrate_repo.DB_INIT_VERSION:
+                self.downgrade(x - 1)
+            self.upgrade(x)
+
+    def test_upgrade_add_initial_tables(self):
+        self.upgrade(migrate_repo.DB_INIT_VERSION + 1)
+        self.check_initial_table_structure()
+
+    def check_initial_table_structure(self):
+        for table in INITIAL_TABLE_STRUCTURE:
+            self.assertTableColumns(table, INITIAL_TABLE_STRUCTURE[table])
+
+        # Ensure the default domain was properly created.
+        default_domain = migration_helpers.get_default_domain()
+
+        meta = sqlalchemy.MetaData()
+        meta.bind = self.engine
+
+        domain_table = sqlalchemy.Table('domain', meta, autoload=True)
+
+        session = self.Session()
+        q = session.query(domain_table)
+        refs = q.all()
+
+        self.assertEqual(1, len(refs))
+        for k in default_domain.keys():
+            self.assertEqual(default_domain[k], getattr(refs[0], k))
+
+    def test_downgrade_to_db_init_version(self):
+        self.upgrade(self.max_version)
+
+        if self.engine.name == 'mysql':
+            self._mysql_check_all_tables_innodb()
+
+        self.downgrade(migrate_repo.DB_INIT_VERSION + 1)
+        self.check_initial_table_structure()
+
+        meta = sqlalchemy.MetaData()
+        meta.bind = self.engine
+        meta.reflect(self.engine)
+
+        initial_table_set = set(INITIAL_TABLE_STRUCTURE.keys())
+        table_set = set(meta.tables.keys())
+        # explicitly remove the migrate_version table, this is not controlled
+        # by the migration scripts and should be exempt from this check.
+        table_set.remove('migrate_version')
+
+        self.assertSetEqual(initial_table_set, table_set)
+        # Downgrade to before Icehouse's release schema version (044) is not
+        # supported. A NotImplementedError should be raised when attempting to
+        # downgrade.
+        self.assertRaises(NotImplementedError, self.downgrade,
+                          migrate_repo.DB_INIT_VERSION)
+
+    def insert_dict(self, session, table_name, d, table=None):
+        """Naively inserts key-value pairs into a table, given a dictionary."""
+        if table is None:
+            this_table = sqlalchemy.Table(table_name, self.metadata,
+                                          autoload=True)
+        else:
+            this_table = table
+        insert = this_table.insert().values(**d)
+        session.execute(insert)
+        session.commit()
+
+    def test_id_mapping(self):
+        self.upgrade(50)
+        self.assertTableDoesNotExist('id_mapping')
+        self.upgrade(51)
+        self.assertTableExists('id_mapping')
+        self.downgrade(50)
+        self.assertTableDoesNotExist('id_mapping')
+
+    def test_region_url_upgrade(self):
+        self.upgrade(52)
+        self.assertTableColumns('region',
+                                ['id', 'description', 'parent_region_id',
+                                 'extra', 'url'])
+
+    def test_region_url_downgrade(self):
+        self.upgrade(52)
+        self.downgrade(51)
+        self.assertTableColumns('region',
+                                ['id', 'description', 'parent_region_id',
+                                 'extra'])
+
+    def test_region_url_cleanup(self):
+        # make sure that the url field is dropped in the downgrade
+        self.upgrade(52)
+        session = self.Session()
+        beta = {
+            'id': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+            'parent_region_id': uuid.uuid4().hex,
+            'url': uuid.uuid4().hex
+        }
+        acme = {
+            'id': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+            'parent_region_id': uuid.uuid4().hex,
+            'url': None
+        }
+        self.insert_dict(session, 'region', beta)
+        self.insert_dict(session, 'region', acme)
+        region_table = sqlalchemy.Table('region', self.metadata, autoload=True)
+        self.assertEqual(2, session.query(region_table).count())
+        session.close()
+        self.downgrade(51)
+        session = self.Session()
+        self.metadata.clear()
+        region_table = sqlalchemy.Table('region', self.metadata, autoload=True)
+        self.assertEqual(2, session.query(region_table).count())
+        region = session.query(region_table)[0]
+        self.assertRaises(AttributeError, getattr, region, 'url')
+
+    def test_endpoint_region_upgrade_columns(self):
+        self.upgrade(53)
+        self.assertTableColumns('endpoint',
+                                ['id', 'legacy_endpoint_id', 'interface',
+                                 'service_id', 'url', 'extra', 'enabled',
+                                 'region_id'])
+        region_table = sqlalchemy.Table('region', self.metadata, autoload=True)
+        self.assertEqual(255, region_table.c.id.type.length)
+        self.assertEqual(255, region_table.c.parent_region_id.type.length)
+        endpoint_table = sqlalchemy.Table('endpoint',
+                                          self.metadata,
+                                          autoload=True)
+        self.assertEqual(255, endpoint_table.c.region_id.type.length)
+
+    def test_endpoint_region_downgrade_columns(self):
+        self.upgrade(53)
+        self.downgrade(52)
+        self.assertTableColumns('endpoint',
+                                ['id', 'legacy_endpoint_id', 'interface',
+                                 'service_id', 'url', 'extra', 'enabled',
+                                 'region'])
+        region_table = sqlalchemy.Table('region', self.metadata, autoload=True)
+        self.assertEqual(64, region_table.c.id.type.length)
+        self.assertEqual(64, region_table.c.parent_region_id.type.length)
+        endpoint_table = sqlalchemy.Table('endpoint',
+                                          self.metadata,
+                                          autoload=True)
+        self.assertEqual(255, endpoint_table.c.region.type.length)
+
+    def test_endpoint_region_migration(self):
+        self.upgrade(52)
+        session = self.Session()
+        _small_region_name = '0' * 30
+        _long_region_name = '0' * 255
+        _clashing_region_name = '0' * 70
+
+        def add_service():
+            service_id = uuid.uuid4().hex
+
+            service = {
+                'id': service_id,
+                'type': uuid.uuid4().hex
+            }
+
+            self.insert_dict(session, 'service', service)
+
+            return service_id
+
+        def add_endpoint(service_id, region):
+            endpoint_id = uuid.uuid4().hex
+
+            endpoint = {
+                'id': endpoint_id,
+                'interface': uuid.uuid4().hex[:8],
+                'service_id': service_id,
+                'url': uuid.uuid4().hex,
+                'region': region
+            }
+            self.insert_dict(session, 'endpoint', endpoint)
+
+            return endpoint_id
+
+        _service_id_ = add_service()
+        add_endpoint(_service_id_, region=_long_region_name)
+        add_endpoint(_service_id_, region=_long_region_name)
+        add_endpoint(_service_id_, region=_clashing_region_name)
+        add_endpoint(_service_id_, region=_small_region_name)
+        add_endpoint(_service_id_, region=None)
+
+        # upgrade to 53
+        session.close()
+        self.upgrade(53)
+        session = self.Session()
+        self.metadata.clear()
+
+        region_table = sqlalchemy.Table('region', self.metadata, autoload=True)
+        self.assertEqual(1, session.query(region_table).
+                         filter_by(id=_long_region_name).count())
+        self.assertEqual(1, session.query(region_table).
+                         filter_by(id=_clashing_region_name).count())
+        self.assertEqual(1, session.query(region_table).
+                         filter_by(id=_small_region_name).count())
+
+        endpoint_table = sqlalchemy.Table('endpoint',
+                                          self.metadata,
+                                          autoload=True)
+        self.assertEqual(5, session.query(endpoint_table).count())
+        self.assertEqual(2, session.query(endpoint_table).
+                         filter_by(region_id=_long_region_name).count())
+        self.assertEqual(1, session.query(endpoint_table).
+                         filter_by(region_id=_clashing_region_name).count())
+        self.assertEqual(1, session.query(endpoint_table).
+                         filter_by(region_id=_small_region_name).count())
+
+        # downgrade to 52
+        session.close()
+        self.downgrade(52)
+        session = self.Session()
+        self.metadata.clear()
+
+        region_table = sqlalchemy.Table('region', self.metadata, autoload=True)
+        self.assertEqual(1, session.query(region_table).count())
+        self.assertEqual(1, session.query(region_table).
+                         filter_by(id=_small_region_name).count())
+
+        endpoint_table = sqlalchemy.Table('endpoint',
+                                          self.metadata,
+                                          autoload=True)
+        self.assertEqual(5, session.query(endpoint_table).count())
+        self.assertEqual(2, session.query(endpoint_table).
+                         filter_by(region=_long_region_name).count())
+        self.assertEqual(1, session.query(endpoint_table).
+                         filter_by(region=_clashing_region_name).count())
+        self.assertEqual(1, session.query(endpoint_table).
+                         filter_by(region=_small_region_name).count())
+
+    def test_add_actor_id_index(self):
+        self.upgrade(53)
+        self.upgrade(54)
+        table = sqlalchemy.Table('assignment', self.metadata, autoload=True)
+        index_data = [(idx.name, idx.columns.keys()) for idx in table.indexes]
+        self.assertIn(('ix_actor_id', ['actor_id']), index_data)
+
+    def test_token_user_id_and_trust_id_index_upgrade(self):
+        self.upgrade(54)
+        self.upgrade(55)
+        table = sqlalchemy.Table('token', self.metadata, autoload=True)
+        index_data = [(idx.name, idx.columns.keys()) for idx in table.indexes]
+        self.assertIn(('ix_token_user_id', ['user_id']), index_data)
+        self.assertIn(('ix_token_trust_id', ['trust_id']), index_data)
+
+    def test_token_user_id_and_trust_id_index_downgrade(self):
+        self.upgrade(55)
+        self.downgrade(54)
+        table = sqlalchemy.Table('token', self.metadata, autoload=True)
+        index_data = [(idx.name, idx.columns.keys()) for idx in table.indexes]
+        self.assertNotIn(('ix_token_user_id', ['user_id']), index_data)
+        self.assertNotIn(('ix_token_trust_id', ['trust_id']), index_data)
+
+    def test_remove_actor_id_index(self):
+        self.upgrade(54)
+        self.downgrade(53)
+        table = sqlalchemy.Table('assignment', self.metadata, autoload=True)
+        index_data = [(idx.name, idx.columns.keys()) for idx in table.indexes]
+        self.assertNotIn(('ix_actor_id', ['actor_id']), index_data)
+
+    def test_project_parent_id_upgrade(self):
+        self.upgrade(61)
+        self.assertTableColumns('project',
+                                ['id', 'name', 'extra', 'description',
+                                 'enabled', 'domain_id', 'parent_id'])
+
+    def test_project_parent_id_downgrade(self):
+        self.upgrade(61)
+        self.downgrade(60)
+        self.assertTableColumns('project',
+                                ['id', 'name', 'extra', 'description',
+                                 'enabled', 'domain_id'])
+
+    def test_project_parent_id_cleanup(self):
+        # make sure that the parent_id field is dropped in the downgrade
+        self.upgrade(61)
+        session = self.Session()
+        domain = {'id': uuid.uuid4().hex,
+                  'name': uuid.uuid4().hex,
+                  'enabled': True}
+        acme = {
+            'id': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+            'domain_id': domain['id'],
+            'name': uuid.uuid4().hex,
+            'parent_id': None
+        }
+        beta = {
+            'id': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+            'domain_id': domain['id'],
+            'name': uuid.uuid4().hex,
+            'parent_id': acme['id']
+        }
+        self.insert_dict(session, 'domain', domain)
+        self.insert_dict(session, 'project', acme)
+        self.insert_dict(session, 'project', beta)
+        proj_table = sqlalchemy.Table('project', self.metadata, autoload=True)
+        self.assertEqual(2, session.query(proj_table).count())
+        session.close()
+        self.downgrade(60)
+        session = self.Session()
+        self.metadata.clear()
+        proj_table = sqlalchemy.Table('project', self.metadata, autoload=True)
+        self.assertEqual(2, session.query(proj_table).count())
+        project = session.query(proj_table)[0]
+        self.assertRaises(AttributeError, getattr, project, 'parent_id')
+
+    def test_drop_assignment_role_fk(self):
+        self.upgrade(61)
+        self.assertTrue(self.does_fk_exist('assignment', 'role_id'))
+        self.upgrade(62)
+        if self.engine.name != 'sqlite':
+            # sqlite does not support FK deletions (or enforcement)
+            self.assertFalse(self.does_fk_exist('assignment', 'role_id'))
+        self.downgrade(61)
+        self.assertTrue(self.does_fk_exist('assignment', 'role_id'))
+
+    def does_fk_exist(self, table, fk_column):
+        inspector = reflection.Inspector.from_engine(self.engine)
+        for fk in inspector.get_foreign_keys(table):
+            if fk_column in fk['constrained_columns']:
+                return True
+        return False
+
+    def test_drop_region_url_upgrade(self):
+        self.upgrade(63)
+        self.assertTableColumns('region',
+                                ['id', 'description', 'parent_region_id',
+                                 'extra'])
+
+    def test_drop_region_url_downgrade(self):
+        self.upgrade(63)
+        self.downgrade(62)
+        self.assertTableColumns('region',
+                                ['id', 'description', 'parent_region_id',
+                                 'extra', 'url'])
+
+    def test_drop_domain_fk(self):
+        self.upgrade(63)
+        self.assertTrue(self.does_fk_exist('group', 'domain_id'))
+        self.assertTrue(self.does_fk_exist('user', 'domain_id'))
+        self.upgrade(64)
+        if self.engine.name != 'sqlite':
+            # sqlite does not support FK deletions (or enforcement)
+            self.assertFalse(self.does_fk_exist('group', 'domain_id'))
+            self.assertFalse(self.does_fk_exist('user', 'domain_id'))
+        self.downgrade(63)
+        self.assertTrue(self.does_fk_exist('group', 'domain_id'))
+        self.assertTrue(self.does_fk_exist('user', 'domain_id'))
+
+    def test_add_domain_config(self):
+        whitelisted_table = 'whitelisted_config'
+        sensitive_table = 'sensitive_config'
+        self.upgrade(64)
+        self.assertTableDoesNotExist(whitelisted_table)
+        self.assertTableDoesNotExist(sensitive_table)
+        self.upgrade(65)
+        self.assertTableColumns(whitelisted_table,
+                                ['domain_id', 'group', 'option', 'value'])
+        self.assertTableColumns(sensitive_table,
+                                ['domain_id', 'group', 'option', 'value'])
+        self.downgrade(64)
+        self.assertTableDoesNotExist(whitelisted_table)
+        self.assertTableDoesNotExist(sensitive_table)
+
+    def test_fixup_service_name_value_upgrade(self):
+        """Update service name data from `extra` to empty string."""
+        def add_service(**extra_data):
+            service_id = uuid.uuid4().hex
+
+            service = {
+                'id': service_id,
+                'type': uuid.uuid4().hex,
+                'extra': json.dumps(extra_data),
+            }
+
+            self.insert_dict(session, 'service', service)
+
+            return service_id
+
+        self.upgrade(65)
+        session = self.Session()
+
+        # Services with extra values having a random attribute and
+        # different combinations of name
+        random_attr_name = uuid.uuid4().hex
+        random_attr_value = uuid.uuid4().hex
+        random_attr_str = "%s='%s'" % (random_attr_name, random_attr_value)
+        random_attr_no_name = {random_attr_name: random_attr_value}
+        random_attr_no_name_str = "%s='%s'" % (random_attr_name,
+                                               random_attr_value)
+        random_attr_name_value = {random_attr_name: random_attr_value,
+                                  'name': 'myname'}
+        random_attr_name_value_str = 'name=myname,%s' % random_attr_str
+        random_attr_name_empty = {random_attr_name: random_attr_value,
+                                  'name': ''}
+        random_attr_name_empty_str = 'name=,%s' % random_attr_str
+        random_attr_name_none = {random_attr_name: random_attr_value,
+                                 'name': None}
+        random_attr_name_none_str = 'name=None,%s' % random_attr_str
+
+        services = [
+            (add_service(**random_attr_no_name),
+             random_attr_name_empty, random_attr_no_name_str),
+            (add_service(**random_attr_name_value),
+             random_attr_name_value, random_attr_name_value_str),
+            (add_service(**random_attr_name_empty),
+             random_attr_name_empty, random_attr_name_empty_str),
+            (add_service(**random_attr_name_none),
+             random_attr_name_empty, random_attr_name_none_str),
+        ]
+
+        session.close()
+        self.upgrade(66)
+        session = self.Session()
+
+        # Verify that the services have the expected values.
+        self.metadata.clear()
+        service_table = sqlalchemy.Table('service', self.metadata,
+                                         autoload=True)
+
+        def fetch_service_extra(service_id):
+            cols = [service_table.c.extra]
+            f = service_table.c.id == service_id
+            s = sqlalchemy.select(cols).where(f)
+            service = session.execute(s).fetchone()
+            return json.loads(service.extra)
+
+        for service_id, exp_extra, msg in services:
+            extra = fetch_service_extra(service_id)
+            self.assertDictEqual(exp_extra, extra, msg)
+
+    def populate_user_table(self, with_pass_enab=False,
+                            with_pass_enab_domain=False):
+        # Populate the appropriate fields in the user
+        # table, depending on the parameters:
+        #
+        # Default: id, name, extra
+        # pass_enab: Add password, enabled as well
+        # pass_enab_domain: Add password, enabled and domain as well
+        #
+        this_table = sqlalchemy.Table("user",
+                                      self.metadata,
+                                      autoload=True)
+        for user in default_fixtures.USERS:
+            extra = copy.deepcopy(user)
+            extra.pop('id')
+            extra.pop('name')
+
+            if with_pass_enab:
+                password = extra.pop('password', None)
+                enabled = extra.pop('enabled', True)
+                ins = this_table.insert().values(
+                    {'id': user['id'],
+                     'name': user['name'],
+                     'password': password,
+                     'enabled': bool(enabled),
+                     'extra': json.dumps(extra)})
+            else:
+                if with_pass_enab_domain:
+                    password = extra.pop('password', None)
+                    enabled = extra.pop('enabled', True)
+                    extra.pop('domain_id')
+                    ins = this_table.insert().values(
+                        {'id': user['id'],
+                         'name': user['name'],
+                         'domain_id': user['domain_id'],
+                         'password': password,
+                         'enabled': bool(enabled),
+                         'extra': json.dumps(extra)})
+                else:
+                    ins = this_table.insert().values(
+                        {'id': user['id'],
+                         'name': user['name'],
+                         'extra': json.dumps(extra)})
+            self.engine.execute(ins)
+
+    def populate_tenant_table(self, with_desc_enab=False,
+                              with_desc_enab_domain=False):
+        # Populate the appropriate fields in the tenant or
+        # project table, depending on the parameters
+        #
+        # Default: id, name, extra
+        # desc_enab: Add description, enabled as well
+        # desc_enab_domain: Add description, enabled and domain as well,
+        #                   plus use project instead of tenant
+        #
+        if with_desc_enab_domain:
+            # By this time tenants are now projects
+            this_table = sqlalchemy.Table("project",
+                                          self.metadata,
+                                          autoload=True)
+        else:
+            this_table = sqlalchemy.Table("tenant",
+                                          self.metadata,
+                                          autoload=True)
+
+        for tenant in default_fixtures.TENANTS:
+            extra = copy.deepcopy(tenant)
+            extra.pop('id')
+            extra.pop('name')
+
+            if with_desc_enab:
+                desc = extra.pop('description', None)
+                enabled = extra.pop('enabled', True)
+                ins = this_table.insert().values(
+                    {'id': tenant['id'],
+                     'name': tenant['name'],
+                     'description': desc,
+                     'enabled': bool(enabled),
+                     'extra': json.dumps(extra)})
+            else:
+                if with_desc_enab_domain:
+                    desc = extra.pop('description', None)
+                    enabled = extra.pop('enabled', True)
+                    extra.pop('domain_id')
+                    ins = this_table.insert().values(
+                        {'id': tenant['id'],
+                         'name': tenant['name'],
+                         'domain_id': tenant['domain_id'],
+                         'description': desc,
+                         'enabled': bool(enabled),
+                         'extra': json.dumps(extra)})
+                else:
+                    ins = this_table.insert().values(
+                        {'id': tenant['id'],
+                         'name': tenant['name'],
+                         'extra': json.dumps(extra)})
+            self.engine.execute(ins)
+
+    def _mysql_check_all_tables_innodb(self):
+        database = self.engine.url.database
+
+        connection = self.engine.connect()
+        # sanity check
+        total = connection.execute("SELECT count(*) "
+                                   "from information_schema.TABLES "
+                                   "where TABLE_SCHEMA='%(database)s'" %
+                                   dict(database=database))
+        self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")
+
+        noninnodb = connection.execute("SELECT table_name "
+                                       "from information_schema.TABLES "
+                                       "where TABLE_SCHEMA='%(database)s' "
+                                       "and ENGINE!='InnoDB' "
+                                       "and TABLE_NAME!='migrate_version'" %
+                                       dict(database=database))
+        names = [x[0] for x in noninnodb]
+        self.assertEqual([], names,
+                         "Non-InnoDB tables exist")
+
+        connection.close()
+
+
+class VersionTests(SqlMigrateBase):
+
+    _initial_db_version = migrate_repo.DB_INIT_VERSION
+
+    def test_core_initial(self):
+        """Get the version before migrated, it's the initial DB version."""
+        version = migration_helpers.get_db_version()
+        self.assertEqual(migrate_repo.DB_INIT_VERSION, version)
+
+    def test_core_max(self):
+        """When get the version after upgrading, it's the new version."""
+        self.upgrade(self.max_version)
+        version = migration_helpers.get_db_version()
+        self.assertEqual(self.max_version, version)
+
+    def test_extension_not_controlled(self):
+        """When get the version before controlling, raises DbMigrationError."""
+        self.assertRaises(db_exception.DbMigrationError,
+                          migration_helpers.get_db_version,
+                          extension='federation')
+
+    def test_extension_initial(self):
+        """When get the initial version of an extension, it's 0."""
+        for name, extension in six.iteritems(EXTENSIONS):
+            abs_path = migration_helpers.find_migrate_repo(extension)
+            migration.db_version_control(sql.get_engine(), abs_path)
+            version = migration_helpers.get_db_version(extension=name)
+            self.assertEqual(0, version,
+                             'Migrate version for %s is not 0' % name)
+
+    def test_extension_migrated(self):
+        """When get the version after migrating an extension, it's not 0."""
+        for name, extension in six.iteritems(EXTENSIONS):
+            abs_path = migration_helpers.find_migrate_repo(extension)
+            migration.db_version_control(sql.get_engine(), abs_path)
+            migration.db_sync(sql.get_engine(), abs_path)
+            version = migration_helpers.get_db_version(extension=name)
+            self.assertTrue(
+                version > 0,
+                "Version for %s didn't change after migrated?" % name)
+
+    def test_extension_downgraded(self):
+        """When get the version after downgrading an extension, it is 0."""
+        for name, extension in six.iteritems(EXTENSIONS):
+            abs_path = migration_helpers.find_migrate_repo(extension)
+            migration.db_version_control(sql.get_engine(), abs_path)
+            migration.db_sync(sql.get_engine(), abs_path)
+            version = migration_helpers.get_db_version(extension=name)
+            self.assertTrue(
+                version > 0,
+                "Version for %s didn't change after migrated?" % name)
+            migration.db_sync(sql.get_engine(), abs_path, version=0)
+            version = migration_helpers.get_db_version(extension=name)
+            self.assertEqual(0, version,
+                             'Migrate version for %s is not 0' % name)
+
+    def test_unexpected_extension(self):
+        """The version for an extension that doesn't exist raises ImportError.
+
+        """
+
+        extension_name = uuid.uuid4().hex
+        self.assertRaises(ImportError,
+                          migration_helpers.get_db_version,
+                          extension=extension_name)
+
+    def test_unversioned_extension(self):
+        """The version for extensions without migrations raise an exception.
+
+        """
+
+        self.assertRaises(exception.MigrationNotProvided,
+                          migration_helpers.get_db_version,
+                          extension='admin_crud')
+
+    def test_initial_with_extension_version_None(self):
+        """When performing a default migration, also migrate extensions."""
+        migration_helpers.sync_database_to_version(extension=None,
+                                                   version=None)
+        for table in INITIAL_EXTENSION_TABLE_STRUCTURE:
+            self.assertTableColumns(table,
+                                    INITIAL_EXTENSION_TABLE_STRUCTURE[table])
+
+    def test_initial_with_extension_version_max(self):
+        """When migrating to max version, do not migrate extensions."""
+        migration_helpers.sync_database_to_version(extension=None,
+                                                   version=self.max_version)
+        for table in INITIAL_EXTENSION_TABLE_STRUCTURE:
+            self.assertTableDoesNotExist(table)
diff --git a/keystone-moon/keystone/tests/unit/test_ssl.py b/keystone-moon/keystone/tests/unit/test_ssl.py
new file mode 100644 (file)
index 0000000..c5f443b
--- /dev/null
@@ -0,0 +1,176 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import ssl
+
+from oslo_config import cfg
+
+from keystone.common import environment
+from keystone.tests import unit as tests
+from keystone.tests.unit.ksfixtures import appserver
+
+
+CONF = cfg.CONF
+
+CERTDIR = tests.dirs.root('examples', 'pki', 'certs')
+KEYDIR = tests.dirs.root('examples', 'pki', 'private')
+CERT = os.path.join(CERTDIR, 'ssl_cert.pem')
+KEY = os.path.join(KEYDIR, 'ssl_key.pem')
+CA = os.path.join(CERTDIR, 'cacert.pem')
+CLIENT = os.path.join(CERTDIR, 'middleware.pem')
+
+
+class SSLTestCase(tests.TestCase):
+    def setUp(self):
+        super(SSLTestCase, self).setUp()
+        # NOTE(jamespage):
+        # Deal with more secure certificate chain verification
+        # introduced in python 2.7.9 under PEP-0476
+        # https://github.com/python/peps/blob/master/pep-0476.txt
+        self.context = None
+        if hasattr(ssl, '_create_unverified_context'):
+            self.context = ssl._create_unverified_context()
+        self.load_backends()
+
+    def get_HTTPSConnection(self, *args):
+        """Simple helper to configure HTTPSConnection objects."""
+        if self.context:
+            return environment.httplib.HTTPSConnection(
+                *args,
+                context=self.context
+            )
+        else:
+            return environment.httplib.HTTPSConnection(*args)
+
+    def test_1way_ssl_ok(self):
+        """Make sure both public and admin API work with 1-way SSL."""
+        paste_conf = self._paste_config('keystone')
+        ssl_kwargs = dict(cert=CERT, key=KEY, ca=CA)
+
+        # Verify Admin
+        with appserver.AppServer(paste_conf, appserver.ADMIN, **ssl_kwargs):
+            conn = self.get_HTTPSConnection(
+                '127.0.0.1', CONF.eventlet_server.admin_port)
+            conn.request('GET', '/')
+            resp = conn.getresponse()
+            self.assertEqual(300, resp.status)
+
+        # Verify Public
+        with appserver.AppServer(paste_conf, appserver.MAIN, **ssl_kwargs):
+            conn = self.get_HTTPSConnection(
+                '127.0.0.1', CONF.eventlet_server.public_port)
+            conn.request('GET', '/')
+            resp = conn.getresponse()
+            self.assertEqual(300, resp.status)
+
+    def test_2way_ssl_ok(self):
+        """Make sure both public and admin API work with 2-way SSL.
+
+        Requires client certificate.
+        """
+        paste_conf = self._paste_config('keystone')
+        ssl_kwargs = dict(cert=CERT, key=KEY, ca=CA, cert_required=True)
+
+        # Verify Admin
+        with appserver.AppServer(paste_conf, appserver.ADMIN, **ssl_kwargs):
+            conn = self.get_HTTPSConnection(
+                '127.0.0.1', CONF.eventlet_server.admin_port, CLIENT, CLIENT)
+            conn.request('GET', '/')
+            resp = conn.getresponse()
+            self.assertEqual(300, resp.status)
+
+        # Verify Public
+        with appserver.AppServer(paste_conf, appserver.MAIN, **ssl_kwargs):
+            conn = self.get_HTTPSConnection(
+                '127.0.0.1', CONF.eventlet_server.public_port, CLIENT, CLIENT)
+            conn.request('GET', '/')
+            resp = conn.getresponse()
+            self.assertEqual(300, resp.status)
+
+    def test_1way_ssl_with_ipv6_ok(self):
+        """Make sure both public and admin API work with 1-way ipv6 & SSL."""
+        self.skip_if_no_ipv6()
+
+        paste_conf = self._paste_config('keystone')
+        ssl_kwargs = dict(cert=CERT, key=KEY, ca=CA, host="::1")
+
+        # Verify Admin
+        with appserver.AppServer(paste_conf, appserver.ADMIN, **ssl_kwargs):
+            conn = self.get_HTTPSConnection(
+                '::1', CONF.eventlet_server.admin_port)
+            conn.request('GET', '/')
+            resp = conn.getresponse()
+            self.assertEqual(300, resp.status)
+
+        # Verify Public
+        with appserver.AppServer(paste_conf, appserver.MAIN, **ssl_kwargs):
+            conn = self.get_HTTPSConnection(
+                '::1', CONF.eventlet_server.public_port)
+            conn.request('GET', '/')
+            resp = conn.getresponse()
+            self.assertEqual(300, resp.status)
+
+    def test_2way_ssl_with_ipv6_ok(self):
+        """Make sure both public and admin API work with 2-way ipv6 & SSL.
+
+        Requires client certificate.
+        """
+        self.skip_if_no_ipv6()
+
+        paste_conf = self._paste_config('keystone')
+        ssl_kwargs = dict(cert=CERT, key=KEY, ca=CA,
+                          cert_required=True, host="::1")
+
+        # Verify Admin
+        with appserver.AppServer(paste_conf, appserver.ADMIN, **ssl_kwargs):
+            conn = self.get_HTTPSConnection(
+                '::1', CONF.eventlet_server.admin_port, CLIENT, CLIENT)
+            conn.request('GET', '/')
+            resp = conn.getresponse()
+            self.assertEqual(300, resp.status)
+
+        # Verify Public
+        with appserver.AppServer(paste_conf, appserver.MAIN, **ssl_kwargs):
+            conn = self.get_HTTPSConnection(
+                '::1', CONF.eventlet_server.public_port, CLIENT, CLIENT)
+            conn.request('GET', '/')
+            resp = conn.getresponse()
+            self.assertEqual(300, resp.status)
+
+    def test_2way_ssl_fail(self):
+        """Expect to fail when client does not present proper certificate."""
+        paste_conf = self._paste_config('keystone')
+        ssl_kwargs = dict(cert=CERT, key=KEY, ca=CA, cert_required=True)
+
+        # Verify Admin
+        with appserver.AppServer(paste_conf, appserver.ADMIN, **ssl_kwargs):
+            conn = self.get_HTTPSConnection(
+                '127.0.0.1', CONF.eventlet_server.admin_port)
+            try:
+                conn.request('GET', '/')
+                self.fail('Admin API shoulda failed with SSL handshake!')
+            except ssl.SSLError:
+                pass
+
+        # Verify Public
+        with appserver.AppServer(paste_conf, appserver.MAIN, **ssl_kwargs):
+            conn = self.get_HTTPSConnection(
+                '127.0.0.1', CONF.eventlet_server.public_port)
+            try:
+                conn.request('GET', '/')
+                self.fail('Public API shoulda failed with SSL handshake!')
+            except ssl.SSLError:
+                pass
diff --git a/keystone-moon/keystone/tests/unit/test_token_bind.py b/keystone-moon/keystone/tests/unit/test_token_bind.py
new file mode 100644 (file)
index 0000000..7dc7ccc
--- /dev/null
@@ -0,0 +1,198 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import uuid
+
+from keystone.common import wsgi
+from keystone import exception
+from keystone.models import token_model
+from keystone.tests import unit as tests
+from keystone.tests.unit import test_token_provider
+
+
+KERBEROS_BIND = 'USER@REALM'
+ANY = 'any'
+
+
+class BindTest(tests.TestCase):
+    """Test binding tokens to a Principal.
+
+    Even though everything in this file references kerberos the same concepts
+    will apply to all future binding mechanisms.
+    """
+
+    def setUp(self):
+        super(BindTest, self).setUp()
+        self.TOKEN_BIND_KERB = copy.deepcopy(
+            test_token_provider.SAMPLE_V3_TOKEN)
+        self.TOKEN_BIND_KERB['token']['bind'] = {'kerberos': KERBEROS_BIND}
+        self.TOKEN_BIND_UNKNOWN = copy.deepcopy(
+            test_token_provider.SAMPLE_V3_TOKEN)
+        self.TOKEN_BIND_UNKNOWN['token']['bind'] = {'FOO': 'BAR'}
+        self.TOKEN_BIND_NONE = copy.deepcopy(
+            test_token_provider.SAMPLE_V3_TOKEN)
+
+        self.ALL_TOKENS = [self.TOKEN_BIND_KERB, self.TOKEN_BIND_UNKNOWN,
+                           self.TOKEN_BIND_NONE]
+
+    def assert_kerberos_bind(self, tokens, bind_level,
+                             use_kerberos=True, success=True):
+        if not isinstance(tokens, dict):
+            for token in tokens:
+                self.assert_kerberos_bind(token, bind_level,
+                                          use_kerberos=use_kerberos,
+                                          success=success)
+        elif use_kerberos == ANY:
+            for val in (True, False):
+                self.assert_kerberos_bind(tokens, bind_level,
+                                          use_kerberos=val, success=success)
+        else:
+            context = {'environment': {}}
+            self.config_fixture.config(group='token',
+                                       enforce_token_bind=bind_level)
+
+            if use_kerberos:
+                context['environment']['REMOTE_USER'] = KERBEROS_BIND
+                context['environment']['AUTH_TYPE'] = 'Negotiate'
+
+            # NOTE(morganfainberg): This assumes a V3 token.
+            token_ref = token_model.KeystoneToken(
+                token_id=uuid.uuid4().hex,
+                token_data=tokens)
+
+            if not success:
+                self.assertRaises(exception.Unauthorized,
+                                  wsgi.validate_token_bind,
+                                  context, token_ref)
+            else:
+                wsgi.validate_token_bind(context, token_ref)
+
+    # DISABLED
+
+    def test_bind_disabled_with_kerb_user(self):
+        self.assert_kerberos_bind(self.ALL_TOKENS,
+                                  bind_level='disabled',
+                                  use_kerberos=ANY,
+                                  success=True)
+
+    # PERMISSIVE
+
+    def test_bind_permissive_with_kerb_user(self):
+        self.assert_kerberos_bind(self.TOKEN_BIND_KERB,
+                                  bind_level='permissive',
+                                  use_kerberos=True,
+                                  success=True)
+
+    def test_bind_permissive_with_regular_token(self):
+        self.assert_kerberos_bind(self.TOKEN_BIND_NONE,
+                                  bind_level='permissive',
+                                  use_kerberos=ANY,
+                                  success=True)
+
+    def test_bind_permissive_without_kerb_user(self):
+        self.assert_kerberos_bind(self.TOKEN_BIND_KERB,
+                                  bind_level='permissive',
+                                  use_kerberos=False,
+                                  success=False)
+
+    def test_bind_permissive_with_unknown_bind(self):
+        self.assert_kerberos_bind(self.TOKEN_BIND_UNKNOWN,
+                                  bind_level='permissive',
+                                  use_kerberos=ANY,
+                                  success=True)
+
+    # STRICT
+
+    def test_bind_strict_with_regular_token(self):
+        self.assert_kerberos_bind(self.TOKEN_BIND_NONE,
+                                  bind_level='strict',
+                                  use_kerberos=ANY,
+                                  success=True)
+
+    def test_bind_strict_with_kerb_user(self):
+        self.assert_kerberos_bind(self.TOKEN_BIND_KERB,
+                                  bind_level='strict',
+                                  use_kerberos=True,
+                                  success=True)
+
+    def test_bind_strict_without_kerb_user(self):
+        self.assert_kerberos_bind(self.TOKEN_BIND_KERB,
+                                  bind_level='strict',
+                                  use_kerberos=False,
+                                  success=False)
+
+    def test_bind_strict_with_unknown_bind(self):
+        self.assert_kerberos_bind(self.TOKEN_BIND_UNKNOWN,
+                                  bind_level='strict',
+                                  use_kerberos=ANY,
+                                  success=False)
+
+    # REQUIRED
+
+    def test_bind_required_with_regular_token(self):
+        self.assert_kerberos_bind(self.TOKEN_BIND_NONE,
+                                  bind_level='required',
+                                  use_kerberos=ANY,
+                                  success=False)
+
+    def test_bind_required_with_kerb_user(self):
+        self.assert_kerberos_bind(self.TOKEN_BIND_KERB,
+                                  bind_level='required',
+                                  use_kerberos=True,
+                                  success=True)
+
+    def test_bind_required_without_kerb_user(self):
+        self.assert_kerberos_bind(self.TOKEN_BIND_KERB,
+                                  bind_level='required',
+                                  use_kerberos=False,
+                                  success=False)
+
+    def test_bind_required_with_unknown_bind(self):
+        self.assert_kerberos_bind(self.TOKEN_BIND_UNKNOWN,
+                                  bind_level='required',
+                                  use_kerberos=ANY,
+                                  success=False)
+
+    # NAMED
+
+    def test_bind_named_with_regular_token(self):
+        self.assert_kerberos_bind(self.TOKEN_BIND_NONE,
+                                  bind_level='kerberos',
+                                  use_kerberos=ANY,
+                                  success=False)
+
+    def test_bind_named_with_kerb_user(self):
+        self.assert_kerberos_bind(self.TOKEN_BIND_KERB,
+                                  bind_level='kerberos',
+                                  use_kerberos=True,
+                                  success=True)
+
+    def test_bind_named_without_kerb_user(self):
+        self.assert_kerberos_bind(self.TOKEN_BIND_KERB,
+                                  bind_level='kerberos',
+                                  use_kerberos=False,
+                                  success=False)
+
+    def test_bind_named_with_unknown_bind(self):
+        self.assert_kerberos_bind(self.TOKEN_BIND_UNKNOWN,
+                                  bind_level='kerberos',
+                                  use_kerberos=ANY,
+                                  success=False)
+
+    def test_bind_named_with_unknown_scheme(self):
+        self.assert_kerberos_bind(self.ALL_TOKENS,
+                                  bind_level='unknown',
+                                  use_kerberos=ANY,
+                                  success=False)
diff --git a/keystone-moon/keystone/tests/unit/test_token_provider.py b/keystone-moon/keystone/tests/unit/test_token_provider.py
new file mode 100644 (file)
index 0000000..dc08664
--- /dev/null
@@ -0,0 +1,836 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from oslo_config import cfg
+from oslo_utils import timeutils
+
+from keystone.common import dependency
+from keystone import exception
+from keystone.tests import unit as tests
+from keystone.tests.unit.ksfixtures import database
+from keystone import token
+from keystone.token.providers import pki
+from keystone.token.providers import uuid
+
+
+CONF = cfg.CONF
+
+FUTURE_DELTA = datetime.timedelta(seconds=CONF.token.expiration)
+CURRENT_DATE = timeutils.utcnow()
+
+SAMPLE_V2_TOKEN = {
+    "access": {
+        "trust": {
+            "id": "abc123",
+            "trustee_user_id": "123456",
+            "trustor_user_id": "333333",
+            "impersonation": False
+        },
+        "serviceCatalog": [
+            {
+                "endpoints": [
+                    {
+                        "adminURL": "http://localhost:8774/v1.1/01257",
+                        "id": "51934fe63a5b4ac0a32664f64eb462c3",
+                        "internalURL": "http://localhost:8774/v1.1/01257",
+                        "publicURL": "http://localhost:8774/v1.1/01257",
+                        "region": "RegionOne"
+                    }
+                ],
+                "endpoints_links": [],
+                "name": "nova",
+                "type": "compute"
+            },
+            {
+                "endpoints": [
+                    {
+                        "adminURL": "http://localhost:9292",
+                        "id": "aaa17a539e364297a7845d67c7c7cc4b",
+                        "internalURL": "http://localhost:9292",
+                        "publicURL": "http://localhost:9292",
+                        "region": "RegionOne"
+                    }
+                ],
+                "endpoints_links": [],
+                "name": "glance",
+                "type": "image"
+            },
+            {
+                "endpoints": [
+                    {
+                        "adminURL": "http://localhost:8776/v1/01257",
+                        "id": "077d82df25304abeac2294004441db5a",
+                        "internalURL": "http://localhost:8776/v1/01257",
+                        "publicURL": "http://localhost:8776/v1/01257",
+                        "region": "RegionOne"
+                    }
+                ],
+                "endpoints_links": [],
+                "name": "volume",
+                "type": "volume"
+            },
+            {
+                "endpoints": [
+                    {
+                        "adminURL": "http://localhost:8773/services/Admin",
+                        "id": "b06997fd08414903ad458836efaa9067",
+                        "internalURL": "http://localhost:8773/services/Cloud",
+                        "publicURL": "http://localhost:8773/services/Cloud",
+                        "region": "RegionOne"
+                    }
+                ],
+                "endpoints_links": [],
+                "name": "ec2",
+                "type": "ec2"
+            },
+            {
+                "endpoints": [
+                    {
+                        "adminURL": "http://localhost:8080/v1",
+                        "id": "7bd0c643e05a4a2ab40902b2fa0dd4e6",
+                        "internalURL": "http://localhost:8080/v1/AUTH_01257",
+                        "publicURL": "http://localhost:8080/v1/AUTH_01257",
+                        "region": "RegionOne"
+                    }
+                ],
+                "endpoints_links": [],
+                "name": "swift",
+                "type": "object-store"
+            },
+            {
+                "endpoints": [
+                    {
+                        "adminURL": "http://localhost:35357/v2.0",
+                        "id": "02850c5d1d094887bdc46e81e1e15dc7",
+                        "internalURL": "http://localhost:5000/v2.0",
+                        "publicURL": "http://localhost:5000/v2.0",
+                        "region": "RegionOne"
+                    }
+                ],
+                "endpoints_links": [],
+                "name": "keystone",
+                "type": "identity"
+            }
+        ],
+        "token": {
+            "expires": "2013-05-22T00:02:43.941430Z",
+            "id": "ce4fc2d36eea4cc9a36e666ac2f1029a",
+            "issued_at": "2013-05-21T00:02:43.941473Z",
+            "tenant": {
+                "enabled": True,
+                "id": "01257",
+                "name": "service"
+            }
+        },
+        "user": {
+            "id": "f19ddbe2c53c46f189fe66d0a7a9c9ce",
+            "name": "nova",
+            "roles": [
+                {
+                    "name": "_member_"
+                },
+                {
+                    "name": "admin"
+                }
+            ],
+            "roles_links": [],
+            "username": "nova"
+        }
+    }
+}
+
+SAMPLE_V3_TOKEN = {
+    "token": {
+        "catalog": [
+            {
+                "endpoints": [
+                    {
+                        "id": "02850c5d1d094887bdc46e81e1e15dc7",
+                        "interface": "admin",
+                        "region": "RegionOne",
+                        "url": "http://localhost:35357/v2.0"
+                    },
+                    {
+                        "id": "446e244b75034a9ab4b0811e82d0b7c8",
+                        "interface": "internal",
+                        "region": "RegionOne",
+                        "url": "http://localhost:5000/v2.0"
+                    },
+                    {
+                        "id": "47fa3d9f499240abb5dfcf2668f168cd",
+                        "interface": "public",
+                        "region": "RegionOne",
+                        "url": "http://localhost:5000/v2.0"
+                    }
+                ],
+                "id": "26d7541715a44a4d9adad96f9872b633",
+                "type": "identity",
+            },
+            {
+                "endpoints": [
+                    {
+                        "id": "aaa17a539e364297a7845d67c7c7cc4b",
+                        "interface": "admin",
+                        "region": "RegionOne",
+                        "url": "http://localhost:9292"
+                    },
+                    {
+                        "id": "4fa9620e42394cb1974736dce0856c71",
+                        "interface": "internal",
+                        "region": "RegionOne",
+                        "url": "http://localhost:9292"
+                    },
+                    {
+                        "id": "9673687f9bc441d88dec37942bfd603b",
+                        "interface": "public",
+                        "region": "RegionOne",
+                        "url": "http://localhost:9292"
+                    }
+                ],
+                "id": "d27a41843f4e4b0e8cf6dac4082deb0d",
+                "type": "image",
+            },
+            {
+                "endpoints": [
+                    {
+                        "id": "7bd0c643e05a4a2ab40902b2fa0dd4e6",
+                        "interface": "admin",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8080/v1"
+                    },
+                    {
+                        "id": "43bef154594d4ccb8e49014d20624e1d",
+                        "interface": "internal",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8080/v1/AUTH_01257"
+                    },
+                    {
+                        "id": "e63b5f5d7aa3493690189d0ff843b9b3",
+                        "interface": "public",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8080/v1/AUTH_01257"
+                    }
+                ],
+                "id": "a669e152f1104810a4b6701aade721bb",
+                "type": "object-store",
+            },
+            {
+                "endpoints": [
+                    {
+                        "id": "51934fe63a5b4ac0a32664f64eb462c3",
+                        "interface": "admin",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8774/v1.1/01257"
+                    },
+                    {
+                        "id": "869b535eea0d42e483ae9da0d868ebad",
+                        "interface": "internal",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8774/v1.1/01257"
+                    },
+                    {
+                        "id": "93583824c18f4263a2245ca432b132a6",
+                        "interface": "public",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8774/v1.1/01257"
+                    }
+                ],
+                "id": "7f32cc2af6c9476e82d75f80e8b3bbb8",
+                "type": "compute",
+            },
+            {
+                "endpoints": [
+                    {
+                        "id": "b06997fd08414903ad458836efaa9067",
+                        "interface": "admin",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8773/services/Admin"
+                    },
+                    {
+                        "id": "411f7de7c9a8484c9b46c254fb2676e2",
+                        "interface": "internal",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8773/services/Cloud"
+                    },
+                    {
+                        "id": "f21c93f3da014785854b4126d0109c49",
+                        "interface": "public",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8773/services/Cloud"
+                    }
+                ],
+                "id": "b08c9c7d4ef543eba5eeb766f72e5aa1",
+                "type": "ec2",
+            },
+            {
+                "endpoints": [
+                    {
+                        "id": "077d82df25304abeac2294004441db5a",
+                        "interface": "admin",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8776/v1/01257"
+                    },
+                    {
+                        "id": "875bf282362c40219665278b4fd11467",
+                        "interface": "internal",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8776/v1/01257"
+                    },
+                    {
+                        "id": "cd229aa6df0640dc858a8026eb7e640c",
+                        "interface": "public",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8776/v1/01257"
+                    }
+                ],
+                "id": "5db21b82617f4a95816064736a7bec22",
+                "type": "volume",
+            }
+        ],
+        "expires_at": "2013-05-22T00:02:43.941430Z",
+        "issued_at": "2013-05-21T00:02:43.941473Z",
+        "methods": [
+            "password"
+        ],
+        "project": {
+            "domain": {
+                "id": "default",
+                "name": "Default"
+            },
+            "id": "01257",
+            "name": "service"
+        },
+        "roles": [
+            {
+                "id": "9fe2ff9ee4384b1894a90878d3e92bab",
+                "name": "_member_"
+            },
+            {
+                "id": "53bff13443bd4450b97f978881d47b18",
+                "name": "admin"
+            }
+        ],
+        "user": {
+            "domain": {
+                "id": "default",
+                "name": "Default"
+            },
+            "id": "f19ddbe2c53c46f189fe66d0a7a9c9ce",
+            "name": "nova"
+        },
+        "OS-TRUST:trust": {
+            "id": "abc123",
+            "trustee_user_id": "123456",
+            "trustor_user_id": "333333",
+            "impersonation": False
+        }
+    }
+}
+
+SAMPLE_V2_TOKEN_WITH_EMBEDED_VERSION = {
+    "access": {
+        "trust": {
+            "id": "abc123",
+            "trustee_user_id": "123456",
+            "trustor_user_id": "333333",
+            "impersonation": False
+        },
+        "serviceCatalog": [
+            {
+                "endpoints": [
+                    {
+                        "adminURL": "http://localhost:8774/v1.1/01257",
+                        "id": "51934fe63a5b4ac0a32664f64eb462c3",
+                        "internalURL": "http://localhost:8774/v1.1/01257",
+                        "publicURL": "http://localhost:8774/v1.1/01257",
+                        "region": "RegionOne"
+                    }
+                ],
+                "endpoints_links": [],
+                "name": "nova",
+                "type": "compute"
+            },
+            {
+                "endpoints": [
+                    {
+                        "adminURL": "http://localhost:9292",
+                        "id": "aaa17a539e364297a7845d67c7c7cc4b",
+                        "internalURL": "http://localhost:9292",
+                        "publicURL": "http://localhost:9292",
+                        "region": "RegionOne"
+                    }
+                ],
+                "endpoints_links": [],
+                "name": "glance",
+                "type": "image"
+            },
+            {
+                "endpoints": [
+                    {
+                        "adminURL": "http://localhost:8776/v1/01257",
+                        "id": "077d82df25304abeac2294004441db5a",
+                        "internalURL": "http://localhost:8776/v1/01257",
+                        "publicURL": "http://localhost:8776/v1/01257",
+                        "region": "RegionOne"
+                    }
+                ],
+                "endpoints_links": [],
+                "name": "volume",
+                "type": "volume"
+            },
+            {
+                "endpoints": [
+                    {
+                        "adminURL": "http://localhost:8773/services/Admin",
+                        "id": "b06997fd08414903ad458836efaa9067",
+                        "internalURL": "http://localhost:8773/services/Cloud",
+                        "publicURL": "http://localhost:8773/services/Cloud",
+                        "region": "RegionOne"
+                    }
+                ],
+                "endpoints_links": [],
+                "name": "ec2",
+                "type": "ec2"
+            },
+            {
+                "endpoints": [
+                    {
+                        "adminURL": "http://localhost:8080/v1",
+                        "id": "7bd0c643e05a4a2ab40902b2fa0dd4e6",
+                        "internalURL": "http://localhost:8080/v1/AUTH_01257",
+                        "publicURL": "http://localhost:8080/v1/AUTH_01257",
+                        "region": "RegionOne"
+                    }
+                ],
+                "endpoints_links": [],
+                "name": "swift",
+                "type": "object-store"
+            },
+            {
+                "endpoints": [
+                    {
+                        "adminURL": "http://localhost:35357/v2.0",
+                        "id": "02850c5d1d094887bdc46e81e1e15dc7",
+                        "internalURL": "http://localhost:5000/v2.0",
+                        "publicURL": "http://localhost:5000/v2.0",
+                        "region": "RegionOne"
+                    }
+                ],
+                "endpoints_links": [],
+                "name": "keystone",
+                "type": "identity"
+            }
+        ],
+        "token": {
+            "expires": "2013-05-22T00:02:43.941430Z",
+            "id": "ce4fc2d36eea4cc9a36e666ac2f1029a",
+            "issued_at": "2013-05-21T00:02:43.941473Z",
+            "tenant": {
+                "enabled": True,
+                "id": "01257",
+                "name": "service"
+            }
+        },
+        "user": {
+            "id": "f19ddbe2c53c46f189fe66d0a7a9c9ce",
+            "name": "nova",
+            "roles": [
+                {
+                    "name": "_member_"
+                },
+                {
+                    "name": "admin"
+                }
+            ],
+            "roles_links": [],
+            "username": "nova"
+        }
+    },
+    'token_version': 'v2.0'
+}
+SAMPLE_V3_TOKEN_WITH_EMBEDED_VERSION = {
+    "token": {
+        "catalog": [
+            {
+                "endpoints": [
+                    {
+                        "id": "02850c5d1d094887bdc46e81e1e15dc7",
+                        "interface": "admin",
+                        "region": "RegionOne",
+                        "url": "http://localhost:35357/v2.0"
+                    },
+                    {
+                        "id": "446e244b75034a9ab4b0811e82d0b7c8",
+                        "interface": "internal",
+                        "region": "RegionOne",
+                        "url": "http://localhost:5000/v2.0"
+                    },
+                    {
+                        "id": "47fa3d9f499240abb5dfcf2668f168cd",
+                        "interface": "public",
+                        "region": "RegionOne",
+                        "url": "http://localhost:5000/v2.0"
+                    }
+                ],
+                "id": "26d7541715a44a4d9adad96f9872b633",
+                "type": "identity",
+            },
+            {
+                "endpoints": [
+                    {
+                        "id": "aaa17a539e364297a7845d67c7c7cc4b",
+                        "interface": "admin",
+                        "region": "RegionOne",
+                        "url": "http://localhost:9292"
+                    },
+                    {
+                        "id": "4fa9620e42394cb1974736dce0856c71",
+                        "interface": "internal",
+                        "region": "RegionOne",
+                        "url": "http://localhost:9292"
+                    },
+                    {
+                        "id": "9673687f9bc441d88dec37942bfd603b",
+                        "interface": "public",
+                        "region": "RegionOne",
+                        "url": "http://localhost:9292"
+                    }
+                ],
+                "id": "d27a41843f4e4b0e8cf6dac4082deb0d",
+                "type": "image",
+            },
+            {
+                "endpoints": [
+                    {
+                        "id": "7bd0c643e05a4a2ab40902b2fa0dd4e6",
+                        "interface": "admin",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8080/v1"
+                    },
+                    {
+                        "id": "43bef154594d4ccb8e49014d20624e1d",
+                        "interface": "internal",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8080/v1/AUTH_01257"
+                    },
+                    {
+                        "id": "e63b5f5d7aa3493690189d0ff843b9b3",
+                        "interface": "public",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8080/v1/AUTH_01257"
+                    }
+                ],
+                "id": "a669e152f1104810a4b6701aade721bb",
+                "type": "object-store",
+            },
+            {
+                "endpoints": [
+                    {
+                        "id": "51934fe63a5b4ac0a32664f64eb462c3",
+                        "interface": "admin",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8774/v1.1/01257"
+                    },
+                    {
+                        "id": "869b535eea0d42e483ae9da0d868ebad",
+                        "interface": "internal",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8774/v1.1/01257"
+                    },
+                    {
+                        "id": "93583824c18f4263a2245ca432b132a6",
+                        "interface": "public",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8774/v1.1/01257"
+                    }
+                ],
+                "id": "7f32cc2af6c9476e82d75f80e8b3bbb8",
+                "type": "compute",
+            },
+            {
+                "endpoints": [
+                    {
+                        "id": "b06997fd08414903ad458836efaa9067",
+                        "interface": "admin",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8773/services/Admin"
+                    },
+                    {
+                        "id": "411f7de7c9a8484c9b46c254fb2676e2",
+                        "interface": "internal",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8773/services/Cloud"
+                    },
+                    {
+                        "id": "f21c93f3da014785854b4126d0109c49",
+                        "interface": "public",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8773/services/Cloud"
+                    }
+                ],
+                "id": "b08c9c7d4ef543eba5eeb766f72e5aa1",
+                "type": "ec2",
+            },
+            {
+                "endpoints": [
+                    {
+                        "id": "077d82df25304abeac2294004441db5a",
+                        "interface": "admin",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8776/v1/01257"
+                    },
+                    {
+                        "id": "875bf282362c40219665278b4fd11467",
+                        "interface": "internal",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8776/v1/01257"
+                    },
+                    {
+                        "id": "cd229aa6df0640dc858a8026eb7e640c",
+                        "interface": "public",
+                        "region": "RegionOne",
+                        "url": "http://localhost:8776/v1/01257"
+                    }
+                ],
+                "id": "5db21b82617f4a95816064736a7bec22",
+                "type": "volume",
+            }
+        ],
+        "expires_at": "2013-05-22T00:02:43.941430Z",
+        "issued_at": "2013-05-21T00:02:43.941473Z",
+        "methods": [
+            "password"
+        ],
+        "project": {
+            "domain": {
+                "id": "default",
+                "name": "Default"
+            },
+            "id": "01257",
+            "name": "service"
+        },
+        "roles": [
+            {
+                "id": "9fe2ff9ee4384b1894a90878d3e92bab",
+                "name": "_member_"
+            },
+            {
+                "id": "53bff13443bd4450b97f978881d47b18",
+                "name": "admin"
+            }
+        ],
+        "user": {
+            "domain": {
+                "id": "default",
+                "name": "Default"
+            },
+            "id": "f19ddbe2c53c46f189fe66d0a7a9c9ce",
+            "name": "nova"
+        },
+        "OS-TRUST:trust": {
+            "id": "abc123",
+            "trustee_user_id": "123456",
+            "trustor_user_id": "333333",
+            "impersonation": False
+        }
+    },
+    'token_version': 'v3.0'
+}
+
+
+def create_v2_token():
+    return {
+        "access": {
+            "token": {
+                "expires": timeutils.isotime(timeutils.utcnow() +
+                                             FUTURE_DELTA),
+                "issued_at": "2013-05-21T00:02:43.941473Z",
+                "tenant": {
+                    "enabled": True,
+                    "id": "01257",
+                    "name": "service"
+                }
+            }
+        }
+    }
+
+
+SAMPLE_V2_TOKEN_EXPIRED = {
+    "access": {
+        "token": {
+            "expires": timeutils.isotime(CURRENT_DATE),
+            "issued_at": "2013-05-21T00:02:43.941473Z",
+            "tenant": {
+                "enabled": True,
+                "id": "01257",
+                "name": "service"
+            }
+        }
+    }
+}
+
+
+def create_v3_token():
+    return {
+        "token": {
+            'methods': [],
+            "expires_at": timeutils.isotime(timeutils.utcnow() + FUTURE_DELTA),
+            "issued_at": "2013-05-21T00:02:43.941473Z",
+        }
+    }
+
+
+SAMPLE_V3_TOKEN_EXPIRED = {
+    "token": {
+        "expires_at": timeutils.isotime(CURRENT_DATE),
+        "issued_at": "2013-05-21T00:02:43.941473Z",
+    }
+}
+
+SAMPLE_MALFORMED_TOKEN = {
+    "token": {
+        "bogus": {
+            "no expiration data": None
+        }
+    }
+}
+
+
+class TestTokenProvider(tests.TestCase):
+    def setUp(self):
+        super(TestTokenProvider, self).setUp()
+        self.useFixture(database.Database())
+        self.load_backends()
+
+    def test_get_token_version(self):
+        self.assertEqual(
+            token.provider.V2,
+            self.token_provider_api.get_token_version(SAMPLE_V2_TOKEN))
+        self.assertEqual(
+            token.provider.V2,
+            self.token_provider_api.get_token_version(
+                SAMPLE_V2_TOKEN_WITH_EMBEDED_VERSION))
+        self.assertEqual(
+            token.provider.V3,
+            self.token_provider_api.get_token_version(SAMPLE_V3_TOKEN))
+        self.assertEqual(
+            token.provider.V3,
+            self.token_provider_api.get_token_version(
+                SAMPLE_V3_TOKEN_WITH_EMBEDED_VERSION))
+        self.assertRaises(exception.UnsupportedTokenVersionException,
+                          self.token_provider_api.get_token_version,
+                          'bogus')
+
+    def test_supported_token_providers(self):
+        # test default config
+
+        dependency.reset()
+        self.assertIsInstance(token.provider.Manager().driver,
+                              uuid.Provider)
+
+        dependency.reset()
+        self.config_fixture.config(
+            group='token',
+            provider='keystone.token.providers.uuid.Provider')
+        token.provider.Manager()
+
+        dependency.reset()
+        self.config_fixture.config(
+            group='token',
+            provider='keystone.token.providers.pki.Provider')
+        token.provider.Manager()
+
+        dependency.reset()
+        self.config_fixture.config(
+            group='token',
+            provider='keystone.token.providers.pkiz.Provider')
+        token.provider.Manager()
+
+    def test_unsupported_token_provider(self):
+        self.config_fixture.config(group='token',
+                                   provider='my.package.MyProvider')
+        self.assertRaises(ImportError,
+                          token.provider.Manager)
+
+    def test_provider_token_expiration_validation(self):
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api._is_valid_token,
+                          SAMPLE_V2_TOKEN_EXPIRED)
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api._is_valid_token,
+                          SAMPLE_V3_TOKEN_EXPIRED)
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api._is_valid_token,
+                          SAMPLE_MALFORMED_TOKEN)
+        self.assertIsNone(
+            self.token_provider_api._is_valid_token(create_v2_token()))
+        self.assertIsNone(
+            self.token_provider_api._is_valid_token(create_v3_token()))
+
+
+# NOTE(ayoung): renamed to avoid automatic test detection
+class PKIProviderTests(object):
+
+    def setUp(self):
+        super(PKIProviderTests, self).setUp()
+
+        from keystoneclient.common import cms
+        self.cms = cms
+
+        from keystone.common import environment
+        self.environment = environment
+
+        old_cms_subprocess = cms.subprocess
+        self.addCleanup(setattr, cms, 'subprocess', old_cms_subprocess)
+
+        old_env_subprocess = environment.subprocess
+        self.addCleanup(setattr, environment, 'subprocess', old_env_subprocess)
+
+        self.cms.subprocess = self.target_subprocess
+        self.environment.subprocess = self.target_subprocess
+
+        reload(pki)  # force module reload so the imports get re-evaluated
+
+    def test_get_token_id_error_handling(self):
+        # cause command-line failure
+        self.config_fixture.config(group='signing',
+                                   keyfile='--please-break-me')
+
+        provider = pki.Provider()
+        token_data = {}
+        self.assertRaises(exception.UnexpectedError,
+                          provider._get_token_id,
+                          token_data)
+
+
+class TestPKIProviderWithEventlet(PKIProviderTests, tests.TestCase):
+
+    def setUp(self):
+        # force keystoneclient.common.cms to use eventlet's subprocess
+        from eventlet.green import subprocess
+        self.target_subprocess = subprocess
+
+        super(TestPKIProviderWithEventlet, self).setUp()
+
+
+class TestPKIProviderWithStdlib(PKIProviderTests, tests.TestCase):
+
+    def setUp(self):
+        # force keystoneclient.common.cms to use the stdlib subprocess
+        import subprocess
+        self.target_subprocess = subprocess
+
+        super(TestPKIProviderWithStdlib, self).setUp()
diff --git a/keystone-moon/keystone/tests/unit/test_url_middleware.py b/keystone-moon/keystone/tests/unit/test_url_middleware.py
new file mode 100644 (file)
index 0000000..1b3872b
--- /dev/null
@@ -0,0 +1,53 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob
+
+from keystone import middleware
+from keystone.tests import unit as tests
+
+
+class FakeApp(object):
+    """Fakes a WSGI app URL normalized."""
+    def __call__(self, env, start_response):
+        resp = webob.Response()
+        resp.body = 'SUCCESS'
+        return resp(env, start_response)
+
+
+class UrlMiddlewareTest(tests.TestCase):
+    def setUp(self):
+        self.middleware = middleware.NormalizingFilter(FakeApp())
+        self.response_status = None
+        self.response_headers = None
+        super(UrlMiddlewareTest, self).setUp()
+
+    def start_fake_response(self, status, headers):
+        self.response_status = int(status.split(' ', 1)[0])
+        self.response_headers = dict(headers)
+
+    def test_trailing_slash_normalization(self):
+        """Tests /v2.0/tokens and /v2.0/tokens/ normalized URLs match."""
+        req1 = webob.Request.blank('/v2.0/tokens')
+        req2 = webob.Request.blank('/v2.0/tokens/')
+        self.middleware(req1.environ, self.start_fake_response)
+        self.middleware(req2.environ, self.start_fake_response)
+        self.assertEqual(req1.path_url, req2.path_url)
+        self.assertEqual('http://localhost/v2.0/tokens', req1.path_url)
+
+    def test_rewrite_empty_path(self):
+        """Tests empty path is rewritten to root."""
+        req = webob.Request.blank('')
+        self.middleware(req.environ, self.start_fake_response)
+        self.assertEqual('http://localhost/', req.path_url)
diff --git a/keystone-moon/keystone/tests/unit/test_v2.py b/keystone-moon/keystone/tests/unit/test_v2.py
new file mode 100644 (file)
index 0000000..8c7c379
--- /dev/null
@@ -0,0 +1,1500 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import time
+import uuid
+
+from keystoneclient.common import cms
+from oslo_config import cfg
+import six
+from testtools import matchers
+
+from keystone.common import extension as keystone_extension
+from keystone.tests.unit import ksfixtures
+from keystone.tests.unit import rest
+
+
+CONF = cfg.CONF
+
+
+class CoreApiTests(object):
+    def assertValidError(self, error):
+        self.assertIsNotNone(error.get('code'))
+        self.assertIsNotNone(error.get('title'))
+        self.assertIsNotNone(error.get('message'))
+
+    def assertValidVersion(self, version):
+        self.assertIsNotNone(version)
+        self.assertIsNotNone(version.get('id'))
+        self.assertIsNotNone(version.get('status'))
+        self.assertIsNotNone(version.get('updated'))
+
+    def assertValidExtension(self, extension):
+        self.assertIsNotNone(extension)
+        self.assertIsNotNone(extension.get('name'))
+        self.assertIsNotNone(extension.get('namespace'))
+        self.assertIsNotNone(extension.get('alias'))
+        self.assertIsNotNone(extension.get('updated'))
+
+    def assertValidExtensionLink(self, link):
+        self.assertIsNotNone(link.get('rel'))
+        self.assertIsNotNone(link.get('type'))
+        self.assertIsNotNone(link.get('href'))
+
+    def assertValidTenant(self, tenant):
+        self.assertIsNotNone(tenant.get('id'))
+        self.assertIsNotNone(tenant.get('name'))
+
+    def assertValidUser(self, user):
+        self.assertIsNotNone(user.get('id'))
+        self.assertIsNotNone(user.get('name'))
+
+    def assertValidRole(self, tenant):
+        self.assertIsNotNone(tenant.get('id'))
+        self.assertIsNotNone(tenant.get('name'))
+
+    def test_public_not_found(self):
+        r = self.public_request(
+            path='/%s' % uuid.uuid4().hex,
+            expected_status=404)
+        self.assertValidErrorResponse(r)
+
+    def test_admin_not_found(self):
+        r = self.admin_request(
+            path='/%s' % uuid.uuid4().hex,
+            expected_status=404)
+        self.assertValidErrorResponse(r)
+
+    def test_public_multiple_choice(self):
+        r = self.public_request(path='/', expected_status=300)
+        self.assertValidMultipleChoiceResponse(r)
+
+    def test_admin_multiple_choice(self):
+        r = self.admin_request(path='/', expected_status=300)
+        self.assertValidMultipleChoiceResponse(r)
+
+    def test_public_version(self):
+        r = self.public_request(path='/v2.0/')
+        self.assertValidVersionResponse(r)
+
+    def test_admin_version(self):
+        r = self.admin_request(path='/v2.0/')
+        self.assertValidVersionResponse(r)
+
+    def test_public_extensions(self):
+        r = self.public_request(path='/v2.0/extensions')
+        self.assertValidExtensionListResponse(
+            r, keystone_extension.PUBLIC_EXTENSIONS)
+
+    def test_admin_extensions(self):
+        r = self.admin_request(path='/v2.0/extensions')
+        self.assertValidExtensionListResponse(
+            r, keystone_extension.ADMIN_EXTENSIONS)
+
+    def test_admin_extensions_404(self):
+        self.admin_request(path='/v2.0/extensions/invalid-extension',
+                           expected_status=404)
+
+    def test_public_osksadm_extension_404(self):
+        self.public_request(path='/v2.0/extensions/OS-KSADM',
+                            expected_status=404)
+
+    def test_admin_osksadm_extension(self):
+        r = self.admin_request(path='/v2.0/extensions/OS-KSADM')
+        self.assertValidExtensionResponse(
+            r, keystone_extension.ADMIN_EXTENSIONS)
+
+    def test_authenticate(self):
+        r = self.public_request(
+            method='POST',
+            path='/v2.0/tokens',
+            body={
+                'auth': {
+                    'passwordCredentials': {
+                        'username': self.user_foo['name'],
+                        'password': self.user_foo['password'],
+                    },
+                    'tenantId': self.tenant_bar['id'],
+                },
+            },
+            expected_status=200)
+        self.assertValidAuthenticationResponse(r, require_service_catalog=True)
+
+    def test_authenticate_unscoped(self):
+        r = self.public_request(
+            method='POST',
+            path='/v2.0/tokens',
+            body={
+                'auth': {
+                    'passwordCredentials': {
+                        'username': self.user_foo['name'],
+                        'password': self.user_foo['password'],
+                    },
+                },
+            },
+            expected_status=200)
+        self.assertValidAuthenticationResponse(r)
+
+    def test_get_tenants_for_token(self):
+        r = self.public_request(path='/v2.0/tenants',
+                                token=self.get_scoped_token())
+        self.assertValidTenantListResponse(r)
+
+    def test_validate_token(self):
+        token = self.get_scoped_token()
+        r = self.admin_request(
+            path='/v2.0/tokens/%(token_id)s' % {
+                'token_id': token,
+            },
+            token=token)
+        self.assertValidAuthenticationResponse(r)
+
+    def test_invalid_token_404(self):
+        token = self.get_scoped_token()
+        self.admin_request(
+            path='/v2.0/tokens/%(token_id)s' % {
+                'token_id': 'invalid',
+            },
+            token=token,
+            expected_status=404)
+
+    def test_validate_token_service_role(self):
+        self.md_foobar = self.assignment_api.add_role_to_user_and_project(
+            self.user_foo['id'],
+            self.tenant_service['id'],
+            self.role_service['id'])
+
+        token = self.get_scoped_token(tenant_id='service')
+        r = self.admin_request(
+            path='/v2.0/tokens/%s' % token,
+            token=token)
+        self.assertValidAuthenticationResponse(r)
+
+    def test_remove_role_revokes_token(self):
+        self.md_foobar = self.assignment_api.add_role_to_user_and_project(
+            self.user_foo['id'],
+            self.tenant_service['id'],
+            self.role_service['id'])
+
+        token = self.get_scoped_token(tenant_id='service')
+        r = self.admin_request(
+            path='/v2.0/tokens/%s' % token,
+            token=token)
+        self.assertValidAuthenticationResponse(r)
+
+        self.assignment_api.remove_role_from_user_and_project(
+            self.user_foo['id'],
+            self.tenant_service['id'],
+            self.role_service['id'])
+
+        r = self.admin_request(
+            path='/v2.0/tokens/%s' % token,
+            token=token,
+            expected_status=401)
+
+    def test_validate_token_belongs_to(self):
+        token = self.get_scoped_token()
+        path = ('/v2.0/tokens/%s?belongsTo=%s' % (token,
+                                                  self.tenant_bar['id']))
+        r = self.admin_request(path=path, token=token)
+        self.assertValidAuthenticationResponse(r, require_service_catalog=True)
+
+    def test_validate_token_no_belongs_to_still_returns_catalog(self):
+        token = self.get_scoped_token()
+        path = ('/v2.0/tokens/%s' % token)
+        r = self.admin_request(path=path, token=token)
+        self.assertValidAuthenticationResponse(r, require_service_catalog=True)
+
+    def test_validate_token_head(self):
+        """The same call as above, except using HEAD.
+
+        There's no response to validate here, but this is included for the
+        sake of completely covering the core API.
+
+        """
+        token = self.get_scoped_token()
+        self.admin_request(
+            method='HEAD',
+            path='/v2.0/tokens/%(token_id)s' % {
+                'token_id': token,
+            },
+            token=token,
+            expected_status=200)
+
+    def test_endpoints(self):
+        token = self.get_scoped_token()
+        r = self.admin_request(
+            path='/v2.0/tokens/%(token_id)s/endpoints' % {
+                'token_id': token,
+            },
+            token=token)
+        self.assertValidEndpointListResponse(r)
+
+    def test_get_tenant(self):
+        token = self.get_scoped_token()
+        r = self.admin_request(
+            path='/v2.0/tenants/%(tenant_id)s' % {
+                'tenant_id': self.tenant_bar['id'],
+            },
+            token=token)
+        self.assertValidTenantResponse(r)
+
+    def test_get_tenant_by_name(self):
+        token = self.get_scoped_token()
+        r = self.admin_request(
+            path='/v2.0/tenants?name=%(tenant_name)s' % {
+                'tenant_name': self.tenant_bar['name'],
+            },
+            token=token)
+        self.assertValidTenantResponse(r)
+
+    def test_get_user_roles_with_tenant(self):
+        token = self.get_scoped_token()
+        r = self.admin_request(
+            path='/v2.0/tenants/%(tenant_id)s/users/%(user_id)s/roles' % {
+                'tenant_id': self.tenant_bar['id'],
+                'user_id': self.user_foo['id'],
+            },
+            token=token)
+        self.assertValidRoleListResponse(r)
+
+    def test_get_user(self):
+        token = self.get_scoped_token()
+        r = self.admin_request(
+            path='/v2.0/users/%(user_id)s' % {
+                'user_id': self.user_foo['id'],
+            },
+            token=token)
+        self.assertValidUserResponse(r)
+
+    def test_get_user_by_name(self):
+        token = self.get_scoped_token()
+        r = self.admin_request(
+            path='/v2.0/users?name=%(user_name)s' % {
+                'user_name': self.user_foo['name'],
+            },
+            token=token)
+        self.assertValidUserResponse(r)
+
+    def test_create_update_user_invalid_enabled_type(self):
+        # Enforce usage of boolean for 'enabled' field
+        token = self.get_scoped_token()
+
+        # Test CREATE request
+        r = self.admin_request(
+            method='POST',
+            path='/v2.0/users',
+            body={
+                'user': {
+                    'name': uuid.uuid4().hex,
+                    'password': uuid.uuid4().hex,
+                    'enabled': "False",
+                },
+            },
+            token=token,
+            expected_status=400)
+        self.assertValidErrorResponse(r)
+
+        r = self.admin_request(
+            method='POST',
+            path='/v2.0/users',
+            body={
+                'user': {
+                    'name': uuid.uuid4().hex,
+                    'password': uuid.uuid4().hex,
+                    # In JSON, 0|1 are not booleans
+                    'enabled': 0,
+                },
+            },
+            token=token,
+            expected_status=400)
+        self.assertValidErrorResponse(r)
+
+        # Test UPDATE request
+        path = '/v2.0/users/%(user_id)s' % {
+               'user_id': self.user_foo['id'],
+        }
+
+        r = self.admin_request(
+            method='PUT',
+            path=path,
+            body={
+                'user': {
+                    'enabled': "False",
+                },
+            },
+            token=token,
+            expected_status=400)
+        self.assertValidErrorResponse(r)
+
+        r = self.admin_request(
+            method='PUT',
+            path=path,
+            body={
+                'user': {
+                    # In JSON, 0|1 are not booleans
+                    'enabled': 1,
+                },
+            },
+            token=token,
+            expected_status=400)
+        self.assertValidErrorResponse(r)
+
+    def test_create_update_user_valid_enabled_type(self):
+        # Enforce usage of boolean for 'enabled' field
+        token = self.get_scoped_token()
+
+        # Test CREATE request
+        self.admin_request(method='POST',
+                           path='/v2.0/users',
+                           body={
+                               'user': {
+                                   'name': uuid.uuid4().hex,
+                                   'password': uuid.uuid4().hex,
+                                   'enabled': False,
+                               },
+                           },
+                           token=token,
+                           expected_status=200)
+
+    def test_error_response(self):
+        """This triggers assertValidErrorResponse by convention."""
+        self.public_request(path='/v2.0/tenants', expected_status=401)
+
+    def test_invalid_parameter_error_response(self):
+        token = self.get_scoped_token()
+        bad_body = {
+            'OS-KSADM:service%s' % uuid.uuid4().hex: {
+                'name': uuid.uuid4().hex,
+                'type': uuid.uuid4().hex,
+            },
+        }
+        res = self.admin_request(method='POST',
+                                 path='/v2.0/OS-KSADM/services',
+                                 body=bad_body,
+                                 token=token,
+                                 expected_status=400)
+        self.assertValidErrorResponse(res)
+        res = self.admin_request(method='POST',
+                                 path='/v2.0/users',
+                                 body=bad_body,
+                                 token=token,
+                                 expected_status=400)
+        self.assertValidErrorResponse(res)
+
+    def _get_user_id(self, r):
+        """Helper method to return user ID from a response.
+
+        This needs to be overridden by child classes
+        based on their content type.
+
+        """
+        raise NotImplementedError()
+
+    def _get_role_id(self, r):
+        """Helper method to return a role ID from a response.
+
+        This needs to be overridden by child classes
+        based on their content type.
+
+        """
+        raise NotImplementedError()
+
+    def _get_role_name(self, r):
+        """Helper method to return role NAME from a response.
+
+        This needs to be overridden by child classes
+        based on their content type.
+
+        """
+        raise NotImplementedError()
+
+    def _get_project_id(self, r):
+        """Helper method to return project ID from a response.
+
+        This needs to be overridden by child classes
+        based on their content type.
+
+        """
+        raise NotImplementedError()
+
+    def assertNoRoles(self, r):
+        """Helper method to assert No Roles
+
+        This needs to be overridden by child classes
+        based on their content type.
+
+        """
+        raise NotImplementedError()
+
+    def test_update_user_tenant(self):
+        token = self.get_scoped_token()
+
+        # Create a new user
+        r = self.admin_request(
+            method='POST',
+            path='/v2.0/users',
+            body={
+                'user': {
+                    'name': uuid.uuid4().hex,
+                    'password': uuid.uuid4().hex,
+                    'tenantId': self.tenant_bar['id'],
+                    'enabled': True,
+                },
+            },
+            token=token,
+            expected_status=200)
+
+        user_id = self._get_user_id(r.result)
+
+        # Check if member_role is in tenant_bar
+        r = self.admin_request(
+            path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
+                'project_id': self.tenant_bar['id'],
+                'user_id': user_id
+            },
+            token=token,
+            expected_status=200)
+        self.assertEqual(CONF.member_role_name, self._get_role_name(r.result))
+
+        # Create a new tenant
+        r = self.admin_request(
+            method='POST',
+            path='/v2.0/tenants',
+            body={
+                'tenant': {
+                    'name': 'test_update_user',
+                    'description': 'A description ...',
+                    'enabled': True,
+                },
+            },
+            token=token,
+            expected_status=200)
+
+        project_id = self._get_project_id(r.result)
+
+        # Update user's tenant
+        r = self.admin_request(
+            method='PUT',
+            path='/v2.0/users/%(user_id)s' % {
+                'user_id': user_id,
+            },
+            body={
+                'user': {
+                    'tenantId': project_id,
+                },
+            },
+            token=token,
+            expected_status=200)
+
+        # 'member_role' should be in new_tenant
+        r = self.admin_request(
+            path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
+                'project_id': project_id,
+                'user_id': user_id
+            },
+            token=token,
+            expected_status=200)
+        self.assertEqual('_member_', self._get_role_name(r.result))
+
+        # 'member_role' should not be in tenant_bar any more
+        r = self.admin_request(
+            path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
+                'project_id': self.tenant_bar['id'],
+                'user_id': user_id
+            },
+            token=token,
+            expected_status=200)
+        self.assertNoRoles(r.result)
+
+    def test_update_user_with_invalid_tenant(self):
+        token = self.get_scoped_token()
+
+        # Create a new user
+        r = self.admin_request(
+            method='POST',
+            path='/v2.0/users',
+            body={
+                'user': {
+                    'name': 'test_invalid_tenant',
+                    'password': uuid.uuid4().hex,
+                    'tenantId': self.tenant_bar['id'],
+                    'enabled': True,
+                },
+            },
+            token=token,
+            expected_status=200)
+        user_id = self._get_user_id(r.result)
+
+        # Update user with an invalid tenant
+        r = self.admin_request(
+            method='PUT',
+            path='/v2.0/users/%(user_id)s' % {
+                'user_id': user_id,
+            },
+            body={
+                'user': {
+                    'tenantId': 'abcde12345heha',
+                },
+            },
+            token=token,
+            expected_status=404)
+
+    def test_update_user_with_invalid_tenant_no_prev_tenant(self):
+        token = self.get_scoped_token()
+
+        # Create a new user
+        r = self.admin_request(
+            method='POST',
+            path='/v2.0/users',
+            body={
+                'user': {
+                    'name': 'test_invalid_tenant',
+                    'password': uuid.uuid4().hex,
+                    'enabled': True,
+                },
+            },
+            token=token,
+            expected_status=200)
+        user_id = self._get_user_id(r.result)
+
+        # Update user with an invalid tenant
+        r = self.admin_request(
+            method='PUT',
+            path='/v2.0/users/%(user_id)s' % {
+                'user_id': user_id,
+            },
+            body={
+                'user': {
+                    'tenantId': 'abcde12345heha',
+                },
+            },
+            token=token,
+            expected_status=404)
+
+    def test_update_user_with_old_tenant(self):
+        token = self.get_scoped_token()
+
+        # Create a new user
+        r = self.admin_request(
+            method='POST',
+            path='/v2.0/users',
+            body={
+                'user': {
+                    'name': uuid.uuid4().hex,
+                    'password': uuid.uuid4().hex,
+                    'tenantId': self.tenant_bar['id'],
+                    'enabled': True,
+                },
+            },
+            token=token,
+            expected_status=200)
+
+        user_id = self._get_user_id(r.result)
+
+        # Check if member_role is in tenant_bar
+        r = self.admin_request(
+            path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
+                'project_id': self.tenant_bar['id'],
+                'user_id': user_id
+            },
+            token=token,
+            expected_status=200)
+        self.assertEqual(CONF.member_role_name, self._get_role_name(r.result))
+
+        # Update user's tenant with old tenant id
+        r = self.admin_request(
+            method='PUT',
+            path='/v2.0/users/%(user_id)s' % {
+                'user_id': user_id,
+            },
+            body={
+                'user': {
+                    'tenantId': self.tenant_bar['id'],
+                },
+            },
+            token=token,
+            expected_status=200)
+
+        # 'member_role' should still be in tenant_bar
+        r = self.admin_request(
+            path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
+                'project_id': self.tenant_bar['id'],
+                'user_id': user_id
+            },
+            token=token,
+            expected_status=200)
+        self.assertEqual('_member_', self._get_role_name(r.result))
+
+    def test_authenticating_a_user_with_no_password(self):
+        token = self.get_scoped_token()
+
+        username = uuid.uuid4().hex
+
+        # create the user
+        self.admin_request(
+            method='POST',
+            path='/v2.0/users',
+            body={
+                'user': {
+                    'name': username,
+                    'enabled': True,
+                },
+            },
+            token=token)
+
+        # fail to authenticate
+        r = self.public_request(
+            method='POST',
+            path='/v2.0/tokens',
+            body={
+                'auth': {
+                    'passwordCredentials': {
+                        'username': username,
+                        'password': 'password',
+                    },
+                },
+            },
+            expected_status=401)
+        self.assertValidErrorResponse(r)
+
+    def test_www_authenticate_header(self):
+        r = self.public_request(
+            path='/v2.0/tenants',
+            expected_status=401)
+        self.assertEqual('Keystone uri="http://localhost"',
+                         r.headers.get('WWW-Authenticate'))
+
+    def test_www_authenticate_header_host(self):
+        test_url = 'http://%s:4187' % uuid.uuid4().hex
+        self.config_fixture.config(public_endpoint=test_url)
+        r = self.public_request(
+            path='/v2.0/tenants',
+            expected_status=401)
+        self.assertEqual('Keystone uri="%s"' % test_url,
+                         r.headers.get('WWW-Authenticate'))
+
+
+class LegacyV2UsernameTests(object):
+    """Tests to show the broken username behavior in V2.
+
+    The V2 API is documented to use `username` instead of `name`.  The
+    API forced used to use name and left the username to fall into the
+    `extra` field.
+
+    These tests ensure this behavior works so fixes to `username`/`name`
+    will be backward compatible.
+    """
+
+    def create_user(self, **user_attrs):
+        """Creates a users and returns the response object.
+
+        :param user_attrs: attributes added to the request body (optional)
+        """
+        token = self.get_scoped_token()
+        body = {
+            'user': {
+                'name': uuid.uuid4().hex,
+                'enabled': True,
+            },
+        }
+        body['user'].update(user_attrs)
+
+        return self.admin_request(
+            method='POST',
+            path='/v2.0/users',
+            token=token,
+            body=body,
+            expected_status=200)
+
+    def test_create_with_extra_username(self):
+        """The response for creating a user will contain the extra fields."""
+        fake_username = uuid.uuid4().hex
+        r = self.create_user(username=fake_username)
+
+        self.assertValidUserResponse(r)
+
+        user = self.get_user_from_response(r)
+        self.assertEqual(fake_username, user.get('username'))
+
+    def test_get_returns_username_from_extra(self):
+        """The response for getting a user will contain the extra fields."""
+        token = self.get_scoped_token()
+
+        fake_username = uuid.uuid4().hex
+        r = self.create_user(username=fake_username)
+
+        id_ = self.get_user_attribute_from_response(r, 'id')
+        r = self.admin_request(path='/v2.0/users/%s' % id_, token=token)
+
+        self.assertValidUserResponse(r)
+
+        user = self.get_user_from_response(r)
+        self.assertEqual(fake_username, user.get('username'))
+
+    def test_update_returns_new_username_when_adding_username(self):
+        """The response for updating a user will contain the extra fields.
+
+        This is specifically testing for updating a username when a value
+        was not previously set.
+        """
+        token = self.get_scoped_token()
+
+        r = self.create_user()
+
+        id_ = self.get_user_attribute_from_response(r, 'id')
+        name = self.get_user_attribute_from_response(r, 'name')
+        enabled = self.get_user_attribute_from_response(r, 'enabled')
+        r = self.admin_request(
+            method='PUT',
+            path='/v2.0/users/%s' % id_,
+            token=token,
+            body={
+                'user': {
+                    'name': name,
+                    'username': 'new_username',
+                    'enabled': enabled,
+                },
+            },
+            expected_status=200)
+
+        self.assertValidUserResponse(r)
+
+        user = self.get_user_from_response(r)
+        self.assertEqual('new_username', user.get('username'))
+
+    def test_update_returns_new_username_when_updating_username(self):
+        """The response for updating a user will contain the extra fields.
+
+        This tests updating a username that was previously set.
+        """
+        token = self.get_scoped_token()
+
+        r = self.create_user(username='original_username')
+
+        id_ = self.get_user_attribute_from_response(r, 'id')
+        name = self.get_user_attribute_from_response(r, 'name')
+        enabled = self.get_user_attribute_from_response(r, 'enabled')
+        r = self.admin_request(
+            method='PUT',
+            path='/v2.0/users/%s' % id_,
+            token=token,
+            body={
+                'user': {
+                    'name': name,
+                    'username': 'new_username',
+                    'enabled': enabled,
+                },
+            },
+            expected_status=200)
+
+        self.assertValidUserResponse(r)
+
+        user = self.get_user_from_response(r)
+        self.assertEqual('new_username', user.get('username'))
+
+    def test_username_is_always_returned_create(self):
+        """Username is set as the value of name if no username is provided.
+
+        This matches the v2.0 spec where we really should be using username
+        and not name.
+        """
+        r = self.create_user()
+
+        self.assertValidUserResponse(r)
+
+        user = self.get_user_from_response(r)
+        self.assertEqual(user.get('name'), user.get('username'))
+
+    def test_username_is_always_returned_get(self):
+        """Username is set as the value of name if no username is provided.
+
+        This matches the v2.0 spec where we really should be using username
+        and not name.
+        """
+        token = self.get_scoped_token()
+
+        r = self.create_user()
+
+        id_ = self.get_user_attribute_from_response(r, 'id')
+        r = self.admin_request(path='/v2.0/users/%s' % id_, token=token)
+
+        self.assertValidUserResponse(r)
+
+        user = self.get_user_from_response(r)
+        self.assertEqual(user.get('name'), user.get('username'))
+
+    def test_username_is_always_returned_get_by_name(self):
+        """Username is set as the value of name if no username is provided.
+
+        This matches the v2.0 spec where we really should be using username
+        and not name.
+        """
+        token = self.get_scoped_token()
+
+        r = self.create_user()
+
+        name = self.get_user_attribute_from_response(r, 'name')
+        r = self.admin_request(path='/v2.0/users?name=%s' % name, token=token)
+
+        self.assertValidUserResponse(r)
+
+        user = self.get_user_from_response(r)
+        self.assertEqual(user.get('name'), user.get('username'))
+
+    def test_username_is_always_returned_update_no_username_provided(self):
+        """Username is set as the value of name if no username is provided.
+
+        This matches the v2.0 spec where we really should be using username
+        and not name.
+        """
+        token = self.get_scoped_token()
+
+        r = self.create_user()
+
+        id_ = self.get_user_attribute_from_response(r, 'id')
+        name = self.get_user_attribute_from_response(r, 'name')
+        enabled = self.get_user_attribute_from_response(r, 'enabled')
+        r = self.admin_request(
+            method='PUT',
+            path='/v2.0/users/%s' % id_,
+            token=token,
+            body={
+                'user': {
+                    'name': name,
+                    'enabled': enabled,
+                },
+            },
+            expected_status=200)
+
+        self.assertValidUserResponse(r)
+
+        user = self.get_user_from_response(r)
+        self.assertEqual(user.get('name'), user.get('username'))
+
+    def test_updated_username_is_returned(self):
+        """Username is set as the value of name if no username is provided.
+
+        This matches the v2.0 spec where we really should be using username
+        and not name.
+        """
+        token = self.get_scoped_token()
+
+        r = self.create_user()
+
+        id_ = self.get_user_attribute_from_response(r, 'id')
+        name = self.get_user_attribute_from_response(r, 'name')
+        enabled = self.get_user_attribute_from_response(r, 'enabled')
+        r = self.admin_request(
+            method='PUT',
+            path='/v2.0/users/%s' % id_,
+            token=token,
+            body={
+                'user': {
+                    'name': name,
+                    'enabled': enabled,
+                },
+            },
+            expected_status=200)
+
+        self.assertValidUserResponse(r)
+
+        user = self.get_user_from_response(r)
+        self.assertEqual(user.get('name'), user.get('username'))
+
+    def test_username_can_be_used_instead_of_name_create(self):
+        token = self.get_scoped_token()
+
+        r = self.admin_request(
+            method='POST',
+            path='/v2.0/users',
+            token=token,
+            body={
+                'user': {
+                    'username': uuid.uuid4().hex,
+                    'enabled': True,
+                },
+            },
+            expected_status=200)
+
+        self.assertValidUserResponse(r)
+
+        user = self.get_user_from_response(r)
+        self.assertEqual(user.get('name'), user.get('username'))
+
+    def test_username_can_be_used_instead_of_name_update(self):
+        token = self.get_scoped_token()
+
+        r = self.create_user()
+
+        id_ = self.get_user_attribute_from_response(r, 'id')
+        new_username = uuid.uuid4().hex
+        enabled = self.get_user_attribute_from_response(r, 'enabled')
+        r = self.admin_request(
+            method='PUT',
+            path='/v2.0/users/%s' % id_,
+            token=token,
+            body={
+                'user': {
+                    'username': new_username,
+                    'enabled': enabled,
+                },
+            },
+            expected_status=200)
+
+        self.assertValidUserResponse(r)
+
+        user = self.get_user_from_response(r)
+        self.assertEqual(new_username, user.get('name'))
+        self.assertEqual(user.get('name'), user.get('username'))
+
+
+class RestfulTestCase(rest.RestfulTestCase):
+
+    def setUp(self):
+        super(RestfulTestCase, self).setUp()
+
+        # TODO(termie): add an admin user to the fixtures and use that user
+        # override the fixtures, for now
+        self.assignment_api.add_role_to_user_and_project(
+            self.user_foo['id'],
+            self.tenant_bar['id'],
+            self.role_admin['id'])
+
+
+class V2TestCase(RestfulTestCase, CoreApiTests, LegacyV2UsernameTests):
+    def _get_user_id(self, r):
+        return r['user']['id']
+
+    def _get_role_name(self, r):
+        return r['roles'][0]['name']
+
+    def _get_role_id(self, r):
+        return r['roles'][0]['id']
+
+    def _get_project_id(self, r):
+        return r['tenant']['id']
+
+    def _get_token_id(self, r):
+        return r.result['access']['token']['id']
+
+    def assertNoRoles(self, r):
+        self.assertEqual([], r['roles'])
+
+    def assertValidErrorResponse(self, r):
+        self.assertIsNotNone(r.result.get('error'))
+        self.assertValidError(r.result['error'])
+        self.assertEqual(r.result['error']['code'], r.status_code)
+
+    def assertValidExtension(self, extension, expected):
+        super(V2TestCase, self).assertValidExtension(extension)
+        descriptions = [ext['description'] for ext in six.itervalues(expected)]
+        description = extension.get('description')
+        self.assertIsNotNone(description)
+        self.assertIn(description, descriptions)
+        self.assertIsNotNone(extension.get('links'))
+        self.assertNotEmpty(extension.get('links'))
+        for link in extension.get('links'):
+            self.assertValidExtensionLink(link)
+
+    def assertValidExtensionListResponse(self, r, expected):
+        self.assertIsNotNone(r.result.get('extensions'))
+        self.assertIsNotNone(r.result['extensions'].get('values'))
+        self.assertNotEmpty(r.result['extensions'].get('values'))
+        for extension in r.result['extensions']['values']:
+            self.assertValidExtension(extension, expected)
+
+    def assertValidExtensionResponse(self, r, expected):
+        self.assertValidExtension(r.result.get('extension'), expected)
+
+    def assertValidUser(self, user):
+        super(V2TestCase, self).assertValidUser(user)
+        self.assertNotIn('default_project_id', user)
+        if 'tenantId' in user:
+            # NOTE(morganfainberg): tenantId should never be "None", it gets
+            # filtered out of the object if it is there. This is suspenders
+            # and a belt check to avoid unintended regressions.
+            self.assertIsNotNone(user.get('tenantId'))
+
+    def assertValidAuthenticationResponse(self, r,
+                                          require_service_catalog=False):
+        self.assertIsNotNone(r.result.get('access'))
+        self.assertIsNotNone(r.result['access'].get('token'))
+        self.assertIsNotNone(r.result['access'].get('user'))
+
+        # validate token
+        self.assertIsNotNone(r.result['access']['token'].get('id'))
+        self.assertIsNotNone(r.result['access']['token'].get('expires'))
+        tenant = r.result['access']['token'].get('tenant')
+        if tenant is not None:
+            # validate tenant
+            self.assertIsNotNone(tenant.get('id'))
+            self.assertIsNotNone(tenant.get('name'))
+
+        # validate user
+        self.assertIsNotNone(r.result['access']['user'].get('id'))
+        self.assertIsNotNone(r.result['access']['user'].get('name'))
+
+        if require_service_catalog:
+            # roles are only provided with a service catalog
+            roles = r.result['access']['user'].get('roles')
+            self.assertNotEmpty(roles)
+            for role in roles:
+                self.assertIsNotNone(role.get('name'))
+
+        serviceCatalog = r.result['access'].get('serviceCatalog')
+        # validate service catalog
+        if require_service_catalog:
+            self.assertIsNotNone(serviceCatalog)
+        if serviceCatalog is not None:
+            self.assertIsInstance(serviceCatalog, list)
+            if require_service_catalog:
+                self.assertNotEmpty(serviceCatalog)
+            for service in r.result['access']['serviceCatalog']:
+                # validate service
+                self.assertIsNotNone(service.get('name'))
+                self.assertIsNotNone(service.get('type'))
+
+                # services contain at least one endpoint
+                self.assertIsNotNone(service.get('endpoints'))
+                self.assertNotEmpty(service['endpoints'])
+                for endpoint in service['endpoints']:
+                    # validate service endpoint
+                    self.assertIsNotNone(endpoint.get('publicURL'))
+
+    def assertValidTenantListResponse(self, r):
+        self.assertIsNotNone(r.result.get('tenants'))
+        self.assertNotEmpty(r.result['tenants'])
+        for tenant in r.result['tenants']:
+            self.assertValidTenant(tenant)
+            self.assertIsNotNone(tenant.get('enabled'))
+            self.assertIn(tenant.get('enabled'), [True, False])
+
+    def assertValidUserResponse(self, r):
+        self.assertIsNotNone(r.result.get('user'))
+        self.assertValidUser(r.result['user'])
+
+    def assertValidTenantResponse(self, r):
+        self.assertIsNotNone(r.result.get('tenant'))
+        self.assertValidTenant(r.result['tenant'])
+
+    def assertValidRoleListResponse(self, r):
+        self.assertIsNotNone(r.result.get('roles'))
+        self.assertNotEmpty(r.result['roles'])
+        for role in r.result['roles']:
+            self.assertValidRole(role)
+
+    def assertValidVersion(self, version):
+        super(V2TestCase, self).assertValidVersion(version)
+
+        self.assertIsNotNone(version.get('links'))
+        self.assertNotEmpty(version.get('links'))
+        for link in version.get('links'):
+            self.assertIsNotNone(link.get('rel'))
+            self.assertIsNotNone(link.get('href'))
+
+        self.assertIsNotNone(version.get('media-types'))
+        self.assertNotEmpty(version.get('media-types'))
+        for media in version.get('media-types'):
+            self.assertIsNotNone(media.get('base'))
+            self.assertIsNotNone(media.get('type'))
+
+    def assertValidMultipleChoiceResponse(self, r):
+        self.assertIsNotNone(r.result.get('versions'))
+        self.assertIsNotNone(r.result['versions'].get('values'))
+        self.assertNotEmpty(r.result['versions']['values'])
+        for version in r.result['versions']['values']:
+            self.assertValidVersion(version)
+
+    def assertValidVersionResponse(self, r):
+        self.assertValidVersion(r.result.get('version'))
+
+    def assertValidEndpointListResponse(self, r):
+        self.assertIsNotNone(r.result.get('endpoints'))
+        self.assertNotEmpty(r.result['endpoints'])
+        for endpoint in r.result['endpoints']:
+            self.assertIsNotNone(endpoint.get('id'))
+            self.assertIsNotNone(endpoint.get('name'))
+            self.assertIsNotNone(endpoint.get('type'))
+            self.assertIsNotNone(endpoint.get('publicURL'))
+            self.assertIsNotNone(endpoint.get('internalURL'))
+            self.assertIsNotNone(endpoint.get('adminURL'))
+
+    def get_user_from_response(self, r):
+        return r.result.get('user')
+
+    def get_user_attribute_from_response(self, r, attribute_name):
+        return r.result['user'][attribute_name]
+
+    def test_service_crud_requires_auth(self):
+        """Service CRUD should 401 without an X-Auth-Token (bug 1006822)."""
+        # values here don't matter because we should 401 before they're checked
+        service_path = '/v2.0/OS-KSADM/services/%s' % uuid.uuid4().hex
+        service_body = {
+            'OS-KSADM:service': {
+                'name': uuid.uuid4().hex,
+                'type': uuid.uuid4().hex,
+            },
+        }
+
+        r = self.admin_request(method='GET',
+                               path='/v2.0/OS-KSADM/services',
+                               expected_status=401)
+        self.assertValidErrorResponse(r)
+
+        r = self.admin_request(method='POST',
+                               path='/v2.0/OS-KSADM/services',
+                               body=service_body,
+                               expected_status=401)
+        self.assertValidErrorResponse(r)
+
+        r = self.admin_request(method='GET',
+                               path=service_path,
+                               expected_status=401)
+        self.assertValidErrorResponse(r)
+
+        r = self.admin_request(method='DELETE',
+                               path=service_path,
+                               expected_status=401)
+        self.assertValidErrorResponse(r)
+
+    def test_user_role_list_requires_auth(self):
+        """User role list should 401 without an X-Auth-Token (bug 1006815)."""
+        # values here don't matter because we should 401 before they're checked
+        path = '/v2.0/tenants/%(tenant_id)s/users/%(user_id)s/roles' % {
+            'tenant_id': uuid.uuid4().hex,
+            'user_id': uuid.uuid4().hex,
+        }
+
+        r = self.admin_request(path=path, expected_status=401)
+        self.assertValidErrorResponse(r)
+
+    def test_fetch_revocation_list_nonadmin_fails(self):
+        self.admin_request(
+            method='GET',
+            path='/v2.0/tokens/revoked',
+            expected_status=401)
+
+    def test_fetch_revocation_list_admin_200(self):
+        token = self.get_scoped_token()
+        r = self.admin_request(
+            method='GET',
+            path='/v2.0/tokens/revoked',
+            token=token,
+            expected_status=200)
+        self.assertValidRevocationListResponse(r)
+
+    def assertValidRevocationListResponse(self, response):
+        self.assertIsNotNone(response.result['signed'])
+
+    def _fetch_parse_revocation_list(self):
+
+        token1 = self.get_scoped_token()
+
+        # TODO(morganfainberg): Because this is making a restful call to the
+        # app a change to UTCNOW via mock.patch will not affect the returned
+        # token. The only surefire way to ensure there is not a transient bug
+        # based upon when the second token is issued is with a sleep. This
+        # issue all stems from the limited resolution (no microseconds) on the
+        # expiry time of tokens and the way revocation events utilizes token
+        # expiry to revoke individual tokens. This is a stop-gap until all
+        # associated issues with resolution on expiration and revocation events
+        # are resolved.
+        time.sleep(1)
+
+        token2 = self.get_scoped_token()
+
+        self.admin_request(method='DELETE',
+                           path='/v2.0/tokens/%s' % token2,
+                           token=token1)
+
+        r = self.admin_request(
+            method='GET',
+            path='/v2.0/tokens/revoked',
+            token=token1,
+            expected_status=200)
+        signed_text = r.result['signed']
+
+        data_json = cms.cms_verify(signed_text, CONF.signing.certfile,
+                                   CONF.signing.ca_certs)
+
+        data = json.loads(data_json)
+
+        return (data, token2)
+
+    def test_fetch_revocation_list_md5(self):
+        """If the server is configured for md5, then the revocation list has
+           tokens hashed with MD5.
+        """
+
+        # The default hash algorithm is md5.
+        hash_algorithm = 'md5'
+
+        (data, token) = self._fetch_parse_revocation_list()
+        token_hash = cms.cms_hash_token(token, mode=hash_algorithm)
+        self.assertThat(token_hash, matchers.Equals(data['revoked'][0]['id']))
+
+    def test_fetch_revocation_list_sha256(self):
+        """If the server is configured for sha256, then the revocation list has
+           tokens hashed with SHA256
+        """
+
+        hash_algorithm = 'sha256'
+        self.config_fixture.config(group='token',
+                                   hash_algorithm=hash_algorithm)
+
+        (data, token) = self._fetch_parse_revocation_list()
+        token_hash = cms.cms_hash_token(token, mode=hash_algorithm)
+        self.assertThat(token_hash, matchers.Equals(data['revoked'][0]['id']))
+
+    def test_create_update_user_invalid_enabled_type(self):
+        # Enforce usage of boolean for 'enabled' field
+        token = self.get_scoped_token()
+
+        # Test CREATE request
+        r = self.admin_request(
+            method='POST',
+            path='/v2.0/users',
+            body={
+                'user': {
+                    'name': uuid.uuid4().hex,
+                    'password': uuid.uuid4().hex,
+                    # In JSON, "true|false" are not boolean
+                    'enabled': "true",
+                },
+            },
+            token=token,
+            expected_status=400)
+        self.assertValidErrorResponse(r)
+
+        # Test UPDATE request
+        r = self.admin_request(
+            method='PUT',
+            path='/v2.0/users/%(user_id)s' % {
+                 'user_id': self.user_foo['id'],
+            },
+            body={
+                'user': {
+                    # In JSON, "true|false" are not boolean
+                    'enabled': "true",
+                },
+            },
+            token=token,
+            expected_status=400)
+        self.assertValidErrorResponse(r)
+
+    def test_authenticating_a_user_with_an_OSKSADM_password(self):
+        token = self.get_scoped_token()
+
+        username = uuid.uuid4().hex
+        password = uuid.uuid4().hex
+
+        # create the user
+        r = self.admin_request(
+            method='POST',
+            path='/v2.0/users',
+            body={
+                'user': {
+                    'name': username,
+                    'OS-KSADM:password': password,
+                    'enabled': True,
+                },
+            },
+            token=token)
+
+        # successfully authenticate
+        self.public_request(
+            method='POST',
+            path='/v2.0/tokens',
+            body={
+                'auth': {
+                    'passwordCredentials': {
+                        'username': username,
+                        'password': password,
+                    },
+                },
+            },
+            expected_status=200)
+
+        # ensure password doesn't leak
+        user_id = r.result['user']['id']
+        r = self.admin_request(
+            method='GET',
+            path='/v2.0/users/%s' % user_id,
+            token=token,
+            expected_status=200)
+        self.assertNotIn('OS-KSADM:password', r.result['user'])
+
+    def test_updating_a_user_with_an_OSKSADM_password(self):
+        token = self.get_scoped_token()
+
+        user_id = self.user_foo['id']
+        password = uuid.uuid4().hex
+
+        # update the user
+        self.admin_request(
+            method='PUT',
+            path='/v2.0/users/%s/OS-KSADM/password' % user_id,
+            body={
+                'user': {
+                   'password': password,
+                },
+            },
+            token=token,
+            expected_status=200)
+
+        # successfully authenticate
+        self.public_request(
+            method='POST',
+            path='/v2.0/tokens',
+            body={
+                'auth': {
+                    'passwordCredentials': {
+                        'username': self.user_foo['name'],
+                        'password': password,
+                    },
+                },
+            },
+            expected_status=200)
+
+
+class RevokeApiTestCase(V2TestCase):
+    def config_overrides(self):
+        super(RevokeApiTestCase, self).config_overrides()
+        self.config_fixture.config(
+            group='revoke',
+            driver='keystone.contrib.revoke.backends.kvs.Revoke')
+        self.config_fixture.config(
+            group='token',
+            provider='keystone.token.providers.pki.Provider',
+            revoke_by_id=False)
+
+    def test_fetch_revocation_list_admin_200(self):
+        self.skipTest('Revoke API disables revocation_list.')
+
+    def test_fetch_revocation_list_md5(self):
+        self.skipTest('Revoke API disables revocation_list.')
+
+    def test_fetch_revocation_list_sha256(self):
+        self.skipTest('Revoke API disables revocation_list.')
+
+
+class TestFernetTokenProviderV2(RestfulTestCase):
+
+    def setUp(self):
+        super(TestFernetTokenProviderV2, self).setUp()
+        self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
+
+    # Used by RestfulTestCase
+    def _get_token_id(self, r):
+        return r.result['access']['token']['id']
+
+    def new_project_ref(self):
+        return {'id': uuid.uuid4().hex,
+                'name': uuid.uuid4().hex,
+                'description': uuid.uuid4().hex,
+                'domain_id': 'default',
+                'enabled': True}
+
+    def config_overrides(self):
+        super(TestFernetTokenProviderV2, self).config_overrides()
+        self.config_fixture.config(
+            group='token',
+            provider='keystone.token.providers.fernet.Provider')
+
+    def test_authenticate_unscoped_token(self):
+        unscoped_token = self.get_unscoped_token()
+        # Fernet token must be of length 255 per usability requirements
+        self.assertLess(len(unscoped_token), 255)
+
+    def test_validate_unscoped_token(self):
+        # Grab an admin token to validate with
+        project_ref = self.new_project_ref()
+        self.resource_api.create_project(project_ref['id'], project_ref)
+        self.assignment_api.add_role_to_user_and_project(self.user_foo['id'],
+                                                         project_ref['id'],
+                                                         self.role_admin['id'])
+        admin_token = self.get_scoped_token(tenant_id=project_ref['id'])
+        unscoped_token = self.get_unscoped_token()
+        path = ('/v2.0/tokens/%s' % unscoped_token)
+        self.admin_request(
+            method='GET',
+            path=path,
+            token=admin_token,
+            expected_status=200)
+
+    def test_authenticate_scoped_token(self):
+        project_ref = self.new_project_ref()
+        self.resource_api.create_project(project_ref['id'], project_ref)
+        self.assignment_api.add_role_to_user_and_project(
+            self.user_foo['id'], project_ref['id'], self.role_service['id'])
+        token = self.get_scoped_token(tenant_id=project_ref['id'])
+        # Fernet token must be of length 255 per usability requirements
+        self.assertLess(len(token), 255)
+
+    def test_validate_scoped_token(self):
+        project_ref = self.new_project_ref()
+        self.resource_api.create_project(project_ref['id'], project_ref)
+        self.assignment_api.add_role_to_user_and_project(self.user_foo['id'],
+                                                         project_ref['id'],
+                                                         self.role_admin['id'])
+        project2_ref = self.new_project_ref()
+        self.resource_api.create_project(project2_ref['id'], project2_ref)
+        self.assignment_api.add_role_to_user_and_project(
+            self.user_foo['id'], project2_ref['id'], self.role_member['id'])
+        admin_token = self.get_scoped_token(tenant_id=project_ref['id'])
+        member_token = self.get_scoped_token(tenant_id=project2_ref['id'])
+        path = ('/v2.0/tokens/%s?belongsTo=%s' % (member_token,
+                project2_ref['id']))
+        # Validate token belongs to project
+        self.admin_request(
+            method='GET',
+            path=path,
+            token=admin_token,
+            expected_status=200)
+
+    def test_token_authentication_and_validation(self):
+        """Test token authentication for Fernet token provider.
+
+        Verify that token authentication returns validate response code and
+        valid token belongs to project.
+        """
+        project_ref = self.new_project_ref()
+        self.resource_api.create_project(project_ref['id'], project_ref)
+        unscoped_token = self.get_unscoped_token()
+        self.assignment_api.add_role_to_user_and_project(self.user_foo['id'],
+                                                         project_ref['id'],
+                                                         self.role_admin['id'])
+        r = self.public_request(
+            method='POST',
+            path='/v2.0/tokens',
+            body={
+                'auth': {
+                    'tenantName': project_ref['name'],
+                    'token': {
+                        'id': unscoped_token.encode('ascii')
+                    }
+                }
+            },
+            expected_status=200)
+
+        token_id = self._get_token_id(r)
+        path = ('/v2.0/tokens/%s?belongsTo=%s' % (token_id, project_ref['id']))
+        # Validate token belongs to project
+        self.admin_request(
+            method='GET',
+            path=path,
+            token=CONF.admin_token,
+            expected_status=200)
diff --git a/keystone-moon/keystone/tests/unit/test_v2_controller.py b/keystone-moon/keystone/tests/unit/test_v2_controller.py
new file mode 100644 (file)
index 0000000..6c1edd0
--- /dev/null
@@ -0,0 +1,95 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import uuid
+
+from keystone.assignment import controllers as assignment_controllers
+from keystone.resource import controllers as resource_controllers
+from keystone.tests import unit as tests
+from keystone.tests.unit import default_fixtures
+from keystone.tests.unit.ksfixtures import database
+
+
+_ADMIN_CONTEXT = {'is_admin': True, 'query_string': {}}
+
+
+class TenantTestCase(tests.TestCase):
+    """Tests for the V2 Tenant controller.
+
+    These tests exercise :class:`keystone.assignment.controllers.Tenant`.
+
+    """
+    def setUp(self):
+        super(TenantTestCase, self).setUp()
+        self.useFixture(database.Database())
+        self.load_backends()
+        self.load_fixtures(default_fixtures)
+        self.tenant_controller = resource_controllers.Tenant()
+        self.assignment_tenant_controller = (
+            assignment_controllers.TenantAssignment())
+        self.assignment_role_controller = (
+            assignment_controllers.RoleAssignmentV2())
+
+    def test_get_project_users_no_user(self):
+        """get_project_users when user doesn't exist.
+
+        When a user that's not known to `identity` has a role on a project,
+        then `get_project_users` just skips that user.
+
+        """
+        project_id = self.tenant_bar['id']
+
+        orig_project_users = (
+            self.assignment_tenant_controller.get_project_users(_ADMIN_CONTEXT,
+                                                                project_id))
+
+        # Assign a role to a user that doesn't exist to the `bar` project.
+
+        user_id = uuid.uuid4().hex
+        self.assignment_role_controller.add_role_to_user(
+            _ADMIN_CONTEXT, user_id, self.role_other['id'], project_id)
+
+        new_project_users = (
+            self.assignment_tenant_controller.get_project_users(_ADMIN_CONTEXT,
+                                                                project_id))
+
+        # The new user isn't included in the result, so no change.
+        # asserting that the expected values appear in the list,
+        # without asserting the order of the results
+        self.assertEqual(sorted(orig_project_users), sorted(new_project_users))
+
+    def test_list_projects_default_domain(self):
+        """Test that list projects only returns those in the default domain."""
+
+        domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                  'enabled': True}
+        self.resource_api.create_domain(domain['id'], domain)
+        project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain['id']}
+        self.resource_api.create_project(project1['id'], project1)
+        # Check the real total number of projects, we should have the above
+        # plus those in the default features
+        refs = self.resource_api.list_projects()
+        self.assertEqual(len(default_fixtures.TENANTS) + 1, len(refs))
+
+        # Now list all projects using the v2 API - we should only get
+        # back those in the default features, since only those are in the
+        # default domain.
+        refs = self.tenant_controller.get_all_projects(_ADMIN_CONTEXT)
+        self.assertEqual(len(default_fixtures.TENANTS), len(refs['tenants']))
+        for tenant in default_fixtures.TENANTS:
+            tenant_copy = tenant.copy()
+            tenant_copy.pop('domain_id')
+            self.assertIn(tenant_copy, refs['tenants'])
diff --git a/keystone-moon/keystone/tests/unit/test_v2_keystoneclient.py b/keystone-moon/keystone/tests/unit/test_v2_keystoneclient.py
new file mode 100644 (file)
index 0000000..7abc5bc
--- /dev/null
@@ -0,0 +1,1045 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import uuid
+
+from keystoneclient import exceptions as client_exceptions
+from keystoneclient.v2_0 import client as ks_client
+import mock
+from oslo_config import cfg
+from oslo_serialization import jsonutils
+from oslo_utils import timeutils
+import webob
+
+from keystone.tests import unit as tests
+from keystone.tests.unit import default_fixtures
+from keystone.tests.unit.ksfixtures import appserver
+from keystone.tests.unit.ksfixtures import database
+
+
+CONF = cfg.CONF
+DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
+
+
+class ClientDrivenTestCase(tests.TestCase):
+
+    def setUp(self):
+        super(ClientDrivenTestCase, self).setUp()
+
+        # FIXME(morganfainberg): Since we are running tests through the
+        # controllers and some internal api drivers are SQL-only, the correct
+        # approach is to ensure we have the correct backing store. The
+        # credential api makes some very SQL specific assumptions that should
+        # be addressed allowing for non-SQL based testing to occur.
+        self.useFixture(database.Database())
+        self.load_backends()
+
+        self.load_fixtures(default_fixtures)
+
+        # TODO(termie): add an admin user to the fixtures and use that user
+        # override the fixtures, for now
+        self.assignment_api.add_role_to_user_and_project(
+            self.user_foo['id'],
+            self.tenant_bar['id'],
+            self.role_admin['id'])
+
+        conf = self._paste_config('keystone')
+        fixture = self.useFixture(appserver.AppServer(conf, appserver.MAIN))
+        self.public_server = fixture.server
+        fixture = self.useFixture(appserver.AppServer(conf, appserver.ADMIN))
+        self.admin_server = fixture.server
+
+        self.addCleanup(self.cleanup_instance('public_server', 'admin_server'))
+
+    def _public_url(self):
+        public_port = self.public_server.socket_info['socket'][1]
+        return "http://localhost:%s/v2.0" % public_port
+
+    def _admin_url(self):
+        admin_port = self.admin_server.socket_info['socket'][1]
+        return "http://localhost:%s/v2.0" % admin_port
+
+    def _client(self, admin=False, **kwargs):
+        url = self._admin_url() if admin else self._public_url()
+        kc = ks_client.Client(endpoint=url,
+                              auth_url=self._public_url(),
+                              **kwargs)
+        kc.authenticate()
+        # have to manually overwrite the management url after authentication
+        kc.management_url = url
+        return kc
+
+    def get_client(self, user_ref=None, tenant_ref=None, admin=False):
+        if user_ref is None:
+            user_ref = self.user_foo
+        if tenant_ref is None:
+            for user in default_fixtures.USERS:
+                # The fixture ID is no longer used as the ID in the database
+                # The fixture ID, however, is still used as part of the
+                # attribute name when storing the created object on the test
+                # case. This means that we need to use the fixture ID below to
+                # find the actial object so that we can get the ID as stored
+                # in the database to compare against.
+                if (getattr(self, 'user_%s' % user['id'])['id'] ==
+                        user_ref['id']):
+                    tenant_id = user['tenants'][0]
+        else:
+            tenant_id = tenant_ref['id']
+
+        return self._client(username=user_ref['name'],
+                            password=user_ref['password'],
+                            tenant_id=tenant_id,
+                            admin=admin)
+
+    def test_authenticate_tenant_name_and_tenants(self):
+        client = self.get_client()
+        tenants = client.tenants.list()
+        self.assertEqual(self.tenant_bar['id'], tenants[0].id)
+
+    def test_authenticate_tenant_id_and_tenants(self):
+        client = self._client(username=self.user_foo['name'],
+                              password=self.user_foo['password'],
+                              tenant_id='bar')
+        tenants = client.tenants.list()
+        self.assertEqual(self.tenant_bar['id'], tenants[0].id)
+
+    def test_authenticate_invalid_tenant_id(self):
+        self.assertRaises(client_exceptions.Unauthorized,
+                          self._client,
+                          username=self.user_foo['name'],
+                          password=self.user_foo['password'],
+                          tenant_id='baz')
+
+    def test_authenticate_token_no_tenant(self):
+        client = self.get_client()
+        token = client.auth_token
+        token_client = self._client(token=token)
+        tenants = token_client.tenants.list()
+        self.assertEqual(self.tenant_bar['id'], tenants[0].id)
+
+    def test_authenticate_token_tenant_id(self):
+        client = self.get_client()
+        token = client.auth_token
+        token_client = self._client(token=token, tenant_id='bar')
+        tenants = token_client.tenants.list()
+        self.assertEqual(self.tenant_bar['id'], tenants[0].id)
+
+    def test_authenticate_token_invalid_tenant_id(self):
+        client = self.get_client()
+        token = client.auth_token
+        self.assertRaises(client_exceptions.Unauthorized,
+                          self._client, token=token,
+                          tenant_id=uuid.uuid4().hex)
+
+    def test_authenticate_token_invalid_tenant_name(self):
+        client = self.get_client()
+        token = client.auth_token
+        self.assertRaises(client_exceptions.Unauthorized,
+                          self._client, token=token,
+                          tenant_name=uuid.uuid4().hex)
+
+    def test_authenticate_token_tenant_name(self):
+        client = self.get_client()
+        token = client.auth_token
+        token_client = self._client(token=token, tenant_name='BAR')
+        tenants = token_client.tenants.list()
+        self.assertEqual(self.tenant_bar['id'], tenants[0].id)
+        self.assertEqual(self.tenant_bar['id'], tenants[0].id)
+
+    def test_authenticate_and_delete_token(self):
+        client = self.get_client(admin=True)
+        token = client.auth_token
+        token_client = self._client(token=token)
+        tenants = token_client.tenants.list()
+        self.assertEqual(self.tenant_bar['id'], tenants[0].id)
+
+        client.tokens.delete(token_client.auth_token)
+
+        self.assertRaises(client_exceptions.Unauthorized,
+                          token_client.tenants.list)
+
+    def test_authenticate_no_password(self):
+        user_ref = self.user_foo.copy()
+        user_ref['password'] = None
+        self.assertRaises(client_exceptions.AuthorizationFailure,
+                          self.get_client,
+                          user_ref)
+
+    def test_authenticate_no_username(self):
+        user_ref = self.user_foo.copy()
+        user_ref['name'] = None
+        self.assertRaises(client_exceptions.AuthorizationFailure,
+                          self.get_client,
+                          user_ref)
+
+    def test_authenticate_disabled_tenant(self):
+        admin_client = self.get_client(admin=True)
+
+        tenant = {
+            'name': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+            'enabled': False,
+        }
+        tenant_ref = admin_client.tenants.create(
+            tenant_name=tenant['name'],
+            description=tenant['description'],
+            enabled=tenant['enabled'])
+        tenant['id'] = tenant_ref.id
+
+        user = {
+            'name': uuid.uuid4().hex,
+            'password': uuid.uuid4().hex,
+            'email': uuid.uuid4().hex,
+            'tenant_id': tenant['id'],
+        }
+        user_ref = admin_client.users.create(
+            name=user['name'],
+            password=user['password'],
+            email=user['email'],
+            tenant_id=user['tenant_id'])
+        user['id'] = user_ref.id
+
+        # password authentication
+        self.assertRaises(
+            client_exceptions.Unauthorized,
+            self._client,
+            username=user['name'],
+            password=user['password'],
+            tenant_id=tenant['id'])
+
+        # token authentication
+        client = self._client(
+            username=user['name'],
+            password=user['password'])
+        self.assertRaises(
+            client_exceptions.Unauthorized,
+            self._client,
+            token=client.auth_token,
+            tenant_id=tenant['id'])
+
+    # FIXME(ja): this test should require the "keystone:admin" roled
+    #            (probably the role set via --keystone_admin_role flag)
+    # FIXME(ja): add a test that admin endpoint is only sent to admin user
+    # FIXME(ja): add a test that admin endpoint returns unauthorized if not
+    #            admin
+    def test_tenant_create_update_and_delete(self):
+        tenant_name = 'original_tenant'
+        tenant_description = 'My original tenant!'
+        tenant_enabled = True
+        client = self.get_client(admin=True)
+
+        # create, get, and list a tenant
+        tenant = client.tenants.create(tenant_name=tenant_name,
+                                       description=tenant_description,
+                                       enabled=tenant_enabled)
+        self.assertEqual(tenant_name, tenant.name)
+        self.assertEqual(tenant_description, tenant.description)
+        self.assertEqual(tenant_enabled, tenant.enabled)
+
+        tenant = client.tenants.get(tenant_id=tenant.id)
+        self.assertEqual(tenant_name, tenant.name)
+        self.assertEqual(tenant_description, tenant.description)
+        self.assertEqual(tenant_enabled, tenant.enabled)
+
+        tenant = [t for t in client.tenants.list() if t.id == tenant.id].pop()
+        self.assertEqual(tenant_name, tenant.name)
+        self.assertEqual(tenant_description, tenant.description)
+        self.assertEqual(tenant_enabled, tenant.enabled)
+
+        # update, get, and list a tenant
+        tenant_name = 'updated_tenant'
+        tenant_description = 'Updated tenant!'
+        tenant_enabled = False
+        tenant = client.tenants.update(tenant_id=tenant.id,
+                                       tenant_name=tenant_name,
+                                       enabled=tenant_enabled,
+                                       description=tenant_description)
+        self.assertEqual(tenant_name, tenant.name)
+        self.assertEqual(tenant_description, tenant.description)
+        self.assertEqual(tenant_enabled, tenant.enabled)
+
+        tenant = client.tenants.get(tenant_id=tenant.id)
+        self.assertEqual(tenant_name, tenant.name)
+        self.assertEqual(tenant_description, tenant.description)
+        self.assertEqual(tenant_enabled, tenant.enabled)
+
+        tenant = [t for t in client.tenants.list() if t.id == tenant.id].pop()
+        self.assertEqual(tenant_name, tenant.name)
+        self.assertEqual(tenant_description, tenant.description)
+        self.assertEqual(tenant_enabled, tenant.enabled)
+
+        # delete, get, and list a tenant
+        client.tenants.delete(tenant=tenant.id)
+        self.assertRaises(client_exceptions.NotFound, client.tenants.get,
+                          tenant.id)
+        self.assertFalse([t for t in client.tenants.list()
+                         if t.id == tenant.id])
+
+    def test_tenant_create_update_and_delete_unicode(self):
+        tenant_name = u'original \u540d\u5b57'
+        tenant_description = 'My original tenant!'
+        tenant_enabled = True
+        client = self.get_client(admin=True)
+
+        # create, get, and list a tenant
+        tenant = client.tenants.create(tenant_name,
+                                       description=tenant_description,
+                                       enabled=tenant_enabled)
+        self.assertEqual(tenant_name, tenant.name)
+        self.assertEqual(tenant_description, tenant.description)
+        self.assertIs(tenant.enabled, tenant_enabled)
+
+        tenant = client.tenants.get(tenant.id)
+        self.assertEqual(tenant_name, tenant.name)
+        self.assertEqual(tenant_description, tenant.description)
+        self.assertIs(tenant.enabled, tenant_enabled)
+
+        # multiple tenants exist due to fixtures, so find the one we're testing
+        tenant = [t for t in client.tenants.list() if t.id == tenant.id].pop()
+        self.assertEqual(tenant_name, tenant.name)
+        self.assertEqual(tenant_description, tenant.description)
+        self.assertIs(tenant.enabled, tenant_enabled)
+
+        # update, get, and list a tenant
+        tenant_name = u'updated \u540d\u5b57'
+        tenant_description = 'Updated tenant!'
+        tenant_enabled = False
+        tenant = client.tenants.update(tenant.id,
+                                       tenant_name=tenant_name,
+                                       enabled=tenant_enabled,
+                                       description=tenant_description)
+        self.assertEqual(tenant_name, tenant.name)
+        self.assertEqual(tenant_description, tenant.description)
+        self.assertIs(tenant.enabled, tenant_enabled)
+
+        tenant = client.tenants.get(tenant.id)
+        self.assertEqual(tenant_name, tenant.name)
+        self.assertEqual(tenant_description, tenant.description)
+        self.assertIs(tenant.enabled, tenant_enabled)
+
+        tenant = [t for t in client.tenants.list() if t.id == tenant.id].pop()
+        self.assertEqual(tenant_name, tenant.name)
+        self.assertEqual(tenant_description, tenant.description)
+        self.assertIs(tenant.enabled, tenant_enabled)
+
+        # delete, get, and list a tenant
+        client.tenants.delete(tenant.id)
+        self.assertRaises(client_exceptions.NotFound, client.tenants.get,
+                          tenant.id)
+        self.assertFalse([t for t in client.tenants.list()
+                         if t.id == tenant.id])
+
+    def test_tenant_create_no_name(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.BadRequest,
+                          client.tenants.create,
+                          tenant_name="")
+
+    def test_tenant_delete_404(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.tenants.delete,
+                          tenant=uuid.uuid4().hex)
+
+    def test_tenant_get_404(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.tenants.get,
+                          tenant_id=uuid.uuid4().hex)
+
+    def test_tenant_update_404(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.tenants.update,
+                          tenant_id=uuid.uuid4().hex)
+
+    def test_tenant_list(self):
+        client = self.get_client()
+        tenants = client.tenants.list()
+        self.assertEqual(1, len(tenants))
+
+        # Admin endpoint should return *all* tenants
+        client = self.get_client(admin=True)
+        tenants = client.tenants.list()
+        self.assertEqual(len(default_fixtures.TENANTS), len(tenants))
+
+    def test_invalid_password(self):
+        good_client = self._client(username=self.user_foo['name'],
+                                   password=self.user_foo['password'])
+        good_client.tenants.list()
+
+        self.assertRaises(client_exceptions.Unauthorized,
+                          self._client,
+                          username=self.user_foo['name'],
+                          password=uuid.uuid4().hex)
+
+    def test_invalid_user_and_password(self):
+        self.assertRaises(client_exceptions.Unauthorized,
+                          self._client,
+                          username=uuid.uuid4().hex,
+                          password=uuid.uuid4().hex)
+
+    def test_change_password_invalidates_token(self):
+        admin_client = self.get_client(admin=True)
+
+        username = uuid.uuid4().hex
+        password = uuid.uuid4().hex
+        user = admin_client.users.create(name=username, password=password,
+                                         email=uuid.uuid4().hex)
+
+        # auth as user should work before a password change
+        client = self._client(username=username, password=password)
+
+        # auth as user with a token should work before a password change
+        self._client(token=client.auth_token)
+
+        # administrative password reset
+        admin_client.users.update_password(
+            user=user.id,
+            password=uuid.uuid4().hex)
+
+        # auth as user with original password should not work after change
+        self.assertRaises(client_exceptions.Unauthorized,
+                          self._client,
+                          username=username,
+                          password=password)
+
+        # authenticate with an old token should not work after change
+        self.assertRaises(client_exceptions.Unauthorized,
+                          self._client,
+                          token=client.auth_token)
+
+    def test_user_change_own_password_invalidates_token(self):
+        # bootstrap a user as admin
+        client = self.get_client(admin=True)
+        username = uuid.uuid4().hex
+        password = uuid.uuid4().hex
+        client.users.create(name=username, password=password,
+                            email=uuid.uuid4().hex)
+
+        # auth as user should work before a password change
+        client = self._client(username=username, password=password)
+
+        # auth as user with a token should work before a password change
+        self._client(token=client.auth_token)
+
+        # change the user's own password
+        # TODO(dolphm): This should NOT raise an HTTPError at all, but rather
+        # this should succeed with a 2xx. This 500 does not prevent the test
+        # from demonstrating the desired consequences below, though.
+        self.assertRaises(client_exceptions.HTTPError,
+                          client.users.update_own_password,
+                          password, uuid.uuid4().hex)
+
+        # auth as user with original password should not work after change
+        self.assertRaises(client_exceptions.Unauthorized,
+                          self._client,
+                          username=username,
+                          password=password)
+
+        # auth as user with an old token should not work after change
+        self.assertRaises(client_exceptions.Unauthorized,
+                          self._client,
+                          token=client.auth_token)
+
+    def test_disable_tenant_invalidates_token(self):
+        admin_client = self.get_client(admin=True)
+        foo_client = self.get_client(self.user_foo)
+        tenant_bar = admin_client.tenants.get(self.tenant_bar['id'])
+
+        # Disable the tenant.
+        tenant_bar.update(enabled=False)
+
+        # Test that the token has been removed.
+        self.assertRaises(client_exceptions.Unauthorized,
+                          foo_client.tokens.authenticate,
+                          token=foo_client.auth_token)
+
+        # Test that the user access has been disabled.
+        self.assertRaises(client_exceptions.Unauthorized,
+                          self.get_client,
+                          self.user_foo)
+
+    def test_delete_tenant_invalidates_token(self):
+        admin_client = self.get_client(admin=True)
+        foo_client = self.get_client(self.user_foo)
+        tenant_bar = admin_client.tenants.get(self.tenant_bar['id'])
+
+        # Delete the tenant.
+        tenant_bar.delete()
+
+        # Test that the token has been removed.
+        self.assertRaises(client_exceptions.Unauthorized,
+                          foo_client.tokens.authenticate,
+                          token=foo_client.auth_token)
+
+        # Test that the user access has been disabled.
+        self.assertRaises(client_exceptions.Unauthorized,
+                          self.get_client,
+                          self.user_foo)
+
+    def test_disable_user_invalidates_token(self):
+        admin_client = self.get_client(admin=True)
+        foo_client = self.get_client(self.user_foo)
+
+        admin_client.users.update_enabled(user=self.user_foo['id'],
+                                          enabled=False)
+
+        self.assertRaises(client_exceptions.Unauthorized,
+                          foo_client.tokens.authenticate,
+                          token=foo_client.auth_token)
+
+        self.assertRaises(client_exceptions.Unauthorized,
+                          self.get_client,
+                          self.user_foo)
+
+    def test_delete_user_invalidates_token(self):
+        admin_client = self.get_client(admin=True)
+        client = self.get_client(admin=False)
+
+        username = uuid.uuid4().hex
+        password = uuid.uuid4().hex
+        user_id = admin_client.users.create(
+            name=username, password=password, email=uuid.uuid4().hex).id
+
+        token_id = client.tokens.authenticate(
+            username=username, password=password).id
+
+        # token should be usable before the user is deleted
+        client.tokens.authenticate(token=token_id)
+
+        admin_client.users.delete(user=user_id)
+
+        # authenticate with a token should not work after the user is deleted
+        self.assertRaises(client_exceptions.Unauthorized,
+                          client.tokens.authenticate,
+                          token=token_id)
+
+    @mock.patch.object(timeutils, 'utcnow')
+    def test_token_expiry_maintained(self, mock_utcnow):
+        now = datetime.datetime.utcnow()
+        mock_utcnow.return_value = now
+        foo_client = self.get_client(self.user_foo)
+
+        orig_token = foo_client.service_catalog.catalog['token']
+        mock_utcnow.return_value = now + datetime.timedelta(seconds=1)
+        reauthenticated_token = foo_client.tokens.authenticate(
+            token=foo_client.auth_token)
+
+        self.assertCloseEnoughForGovernmentWork(
+            timeutils.parse_isotime(orig_token['expires']),
+            timeutils.parse_isotime(reauthenticated_token.expires))
+
+    def test_user_create_update_delete(self):
+        test_username = 'new_user'
+        client = self.get_client(admin=True)
+        user = client.users.create(name=test_username,
+                                   password='password',
+                                   email='user1@test.com')
+        self.assertEqual(test_username, user.name)
+
+        user = client.users.get(user=user.id)
+        self.assertEqual(test_username, user.name)
+
+        user = client.users.update(user=user,
+                                   name=test_username,
+                                   email='user2@test.com')
+        self.assertEqual('user2@test.com', user.email)
+
+        # NOTE(termie): update_enabled doesn't return anything, probably a bug
+        client.users.update_enabled(user=user, enabled=False)
+        user = client.users.get(user.id)
+        self.assertFalse(user.enabled)
+
+        self.assertRaises(client_exceptions.Unauthorized,
+                          self._client,
+                          username=test_username,
+                          password='password')
+        client.users.update_enabled(user, True)
+
+        user = client.users.update_password(user=user, password='password2')
+
+        self._client(username=test_username,
+                     password='password2')
+
+        user = client.users.update_tenant(user=user, tenant='bar')
+        # TODO(ja): once keystonelight supports default tenant
+        #           when you login without specifying tenant, the
+        #           token should be scoped to tenant 'bar'
+
+        client.users.delete(user.id)
+        self.assertRaises(client_exceptions.NotFound, client.users.get,
+                          user.id)
+
+        # Test creating a user with a tenant (auto-add to tenant)
+        user2 = client.users.create(name=test_username,
+                                    password='password',
+                                    email='user1@test.com',
+                                    tenant_id='bar')
+        self.assertEqual(test_username, user2.name)
+
+    def test_update_default_tenant_to_existing_value(self):
+        client = self.get_client(admin=True)
+
+        user = client.users.create(
+            name=uuid.uuid4().hex,
+            password=uuid.uuid4().hex,
+            email=uuid.uuid4().hex,
+            tenant_id=self.tenant_bar['id'])
+
+        # attempting to update the tenant with the existing value should work
+        user = client.users.update_tenant(
+            user=user, tenant=self.tenant_bar['id'])
+
+    def test_user_create_no_string_password(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.BadRequest,
+                          client.users.create,
+                          name='test_user',
+                          password=12345,
+                          email=uuid.uuid4().hex)
+
+    def test_user_create_no_name(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.BadRequest,
+                          client.users.create,
+                          name="",
+                          password=uuid.uuid4().hex,
+                          email=uuid.uuid4().hex)
+
+    def test_user_create_404(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.users.create,
+                          name=uuid.uuid4().hex,
+                          password=uuid.uuid4().hex,
+                          email=uuid.uuid4().hex,
+                          tenant_id=uuid.uuid4().hex)
+
+    def test_user_get_404(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.users.get,
+                          user=uuid.uuid4().hex)
+
+    def test_user_list_404(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.users.list,
+                          tenant_id=uuid.uuid4().hex)
+
+    def test_user_update_404(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.users.update,
+                          user=uuid.uuid4().hex)
+
+    def test_user_update_tenant(self):
+        client = self.get_client(admin=True)
+        tenant_id = uuid.uuid4().hex
+        user = client.users.update(user=self.user_foo['id'],
+                                   tenant_id=tenant_id)
+        self.assertEqual(tenant_id, user.tenant_id)
+
+    def test_user_update_password_404(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.users.update_password,
+                          user=uuid.uuid4().hex,
+                          password=uuid.uuid4().hex)
+
+    def test_user_delete_404(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.users.delete,
+                          user=uuid.uuid4().hex)
+
+    def test_user_list(self):
+        client = self.get_client(admin=True)
+        users = client.users.list()
+        self.assertTrue(len(users) > 0)
+        user = users[0]
+        self.assertRaises(AttributeError, lambda: user.password)
+
+    def test_user_get(self):
+        client = self.get_client(admin=True)
+        user = client.users.get(user=self.user_foo['id'])
+        self.assertRaises(AttributeError, lambda: user.password)
+
+    def test_role_get(self):
+        client = self.get_client(admin=True)
+        role = client.roles.get(role=self.role_admin['id'])
+        self.assertEqual(self.role_admin['id'], role.id)
+
+    def test_role_crud(self):
+        test_role = 'new_role'
+        client = self.get_client(admin=True)
+        role = client.roles.create(name=test_role)
+        self.assertEqual(test_role, role.name)
+
+        role = client.roles.get(role=role.id)
+        self.assertEqual(test_role, role.name)
+
+        client.roles.delete(role=role.id)
+
+        self.assertRaises(client_exceptions.NotFound,
+                          client.roles.delete,
+                          role=role.id)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.roles.get,
+                          role=role.id)
+
+    def test_role_create_no_name(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.BadRequest,
+                          client.roles.create,
+                          name="")
+
+    def test_role_get_404(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.roles.get,
+                          role=uuid.uuid4().hex)
+
+    def test_role_delete_404(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.roles.delete,
+                          role=uuid.uuid4().hex)
+
+    def test_role_list_404(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.roles.roles_for_user,
+                          user=uuid.uuid4().hex,
+                          tenant=uuid.uuid4().hex)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.roles.roles_for_user,
+                          user=self.user_foo['id'],
+                          tenant=uuid.uuid4().hex)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.roles.roles_for_user,
+                          user=uuid.uuid4().hex,
+                          tenant=self.tenant_bar['id'])
+
+    def test_role_list(self):
+        client = self.get_client(admin=True)
+        roles = client.roles.list()
+        # TODO(devcamcar): This assert should be more specific.
+        self.assertTrue(len(roles) > 0)
+
+    def test_service_crud(self):
+        client = self.get_client(admin=True)
+
+        service_name = uuid.uuid4().hex
+        service_type = uuid.uuid4().hex
+        service_desc = uuid.uuid4().hex
+
+        # create & read
+        service = client.services.create(name=service_name,
+                                         service_type=service_type,
+                                         description=service_desc)
+        self.assertEqual(service_name, service.name)
+        self.assertEqual(service_type, service.type)
+        self.assertEqual(service_desc, service.description)
+
+        service = client.services.get(id=service.id)
+        self.assertEqual(service_name, service.name)
+        self.assertEqual(service_type, service.type)
+        self.assertEqual(service_desc, service.description)
+
+        service = [x for x in client.services.list() if x.id == service.id][0]
+        self.assertEqual(service_name, service.name)
+        self.assertEqual(service_type, service.type)
+        self.assertEqual(service_desc, service.description)
+
+        # update is not supported in API v2...
+
+        # delete & read
+        client.services.delete(id=service.id)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.services.get,
+                          id=service.id)
+        services = [x for x in client.services.list() if x.id == service.id]
+        self.assertEqual(0, len(services))
+
+    def test_service_delete_404(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.services.delete,
+                          id=uuid.uuid4().hex)
+
+    def test_service_get_404(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.services.get,
+                          id=uuid.uuid4().hex)
+
+    def test_endpoint_delete_404(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.endpoints.delete,
+                          id=uuid.uuid4().hex)
+
+    def test_admin_requires_adminness(self):
+        # FIXME(ja): this should be Unauthorized
+        exception = client_exceptions.ClientException
+
+        two = self.get_client(self.user_two, admin=True)  # non-admin user
+
+        # USER CRUD
+        self.assertRaises(exception,
+                          two.users.list)
+        self.assertRaises(exception,
+                          two.users.get,
+                          user=self.user_two['id'])
+        self.assertRaises(exception,
+                          two.users.create,
+                          name='oops',
+                          password='password',
+                          email='oops@test.com')
+        self.assertRaises(exception,
+                          two.users.delete,
+                          user=self.user_foo['id'])
+
+        # TENANT CRUD
+        self.assertRaises(exception,
+                          two.tenants.list)
+        self.assertRaises(exception,
+                          two.tenants.get,
+                          tenant_id=self.tenant_bar['id'])
+        self.assertRaises(exception,
+                          two.tenants.create,
+                          tenant_name='oops',
+                          description="shouldn't work!",
+                          enabled=True)
+        self.assertRaises(exception,
+                          two.tenants.delete,
+                          tenant=self.tenant_baz['id'])
+
+        # ROLE CRUD
+        self.assertRaises(exception,
+                          two.roles.get,
+                          role=self.role_admin['id'])
+        self.assertRaises(exception,
+                          two.roles.list)
+        self.assertRaises(exception,
+                          two.roles.create,
+                          name='oops')
+        self.assertRaises(exception,
+                          two.roles.delete,
+                          role=self.role_admin['id'])
+
+        # TODO(ja): MEMBERSHIP CRUD
+        # TODO(ja): determine what else todo
+
+    def test_tenant_add_and_remove_user(self):
+        client = self.get_client(admin=True)
+        client.roles.add_user_role(tenant=self.tenant_bar['id'],
+                                   user=self.user_two['id'],
+                                   role=self.role_other['id'])
+        user_refs = client.tenants.list_users(tenant=self.tenant_bar['id'])
+        self.assertIn(self.user_two['id'], [x.id for x in user_refs])
+        client.roles.remove_user_role(tenant=self.tenant_bar['id'],
+                                      user=self.user_two['id'],
+                                      role=self.role_other['id'])
+        roles = client.roles.roles_for_user(user=self.user_foo['id'],
+                                            tenant=self.tenant_bar['id'])
+        self.assertNotIn(self.role_other['id'], roles)
+        user_refs = client.tenants.list_users(tenant=self.tenant_bar['id'])
+        self.assertNotIn(self.user_two['id'], [x.id for x in user_refs])
+
+    def test_user_role_add_404(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.roles.add_user_role,
+                          tenant=uuid.uuid4().hex,
+                          user=self.user_foo['id'],
+                          role=self.role_member['id'])
+        self.assertRaises(client_exceptions.NotFound,
+                          client.roles.add_user_role,
+                          tenant=self.tenant_baz['id'],
+                          user=self.user_foo['id'],
+                          role=uuid.uuid4().hex)
+
+    def test_user_role_add_no_user(self):
+        # If add_user_role and user doesn't exist, doesn't fail.
+        client = self.get_client(admin=True)
+        client.roles.add_user_role(tenant=self.tenant_baz['id'],
+                                   user=uuid.uuid4().hex,
+                                   role=self.role_member['id'])
+
+    def test_user_role_remove_404(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.roles.remove_user_role,
+                          tenant=uuid.uuid4().hex,
+                          user=self.user_foo['id'],
+                          role=self.role_member['id'])
+        self.assertRaises(client_exceptions.NotFound,
+                          client.roles.remove_user_role,
+                          tenant=self.tenant_baz['id'],
+                          user=uuid.uuid4().hex,
+                          role=self.role_member['id'])
+        self.assertRaises(client_exceptions.NotFound,
+                          client.roles.remove_user_role,
+                          tenant=self.tenant_baz['id'],
+                          user=self.user_foo['id'],
+                          role=uuid.uuid4().hex)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.roles.remove_user_role,
+                          tenant=self.tenant_baz['id'],
+                          user=self.user_foo['id'],
+                          role=self.role_member['id'])
+
+    def test_tenant_list_marker(self):
+        client = self.get_client()
+
+        # Add two arbitrary tenants to user for testing purposes
+        for i in range(2):
+            tenant_id = uuid.uuid4().hex
+            tenant = {'name': 'tenant-%s' % tenant_id, 'id': tenant_id,
+                      'domain_id': DEFAULT_DOMAIN_ID}
+            self.resource_api.create_project(tenant_id, tenant)
+            self.assignment_api.add_user_to_project(tenant_id,
+                                                    self.user_foo['id'])
+
+        tenants = client.tenants.list()
+        self.assertEqual(3, len(tenants))
+
+        tenants_marker = client.tenants.list(marker=tenants[0].id)
+        self.assertEqual(2, len(tenants_marker))
+        self.assertEqual(tenants_marker[0].name, tenants[1].name)
+        self.assertEqual(tenants_marker[1].name, tenants[2].name)
+
+    def test_tenant_list_marker_not_found(self):
+        client = self.get_client()
+        self.assertRaises(client_exceptions.BadRequest,
+                          client.tenants.list, marker=uuid.uuid4().hex)
+
+    def test_tenant_list_limit(self):
+        client = self.get_client()
+
+        # Add two arbitrary tenants to user for testing purposes
+        for i in range(2):
+            tenant_id = uuid.uuid4().hex
+            tenant = {'name': 'tenant-%s' % tenant_id, 'id': tenant_id,
+                      'domain_id': DEFAULT_DOMAIN_ID}
+            self.resource_api.create_project(tenant_id, tenant)
+            self.assignment_api.add_user_to_project(tenant_id,
+                                                    self.user_foo['id'])
+
+        tenants = client.tenants.list()
+        self.assertEqual(3, len(tenants))
+
+        tenants_limited = client.tenants.list(limit=2)
+        self.assertEqual(2, len(tenants_limited))
+        self.assertEqual(tenants[0].name, tenants_limited[0].name)
+        self.assertEqual(tenants[1].name, tenants_limited[1].name)
+
+    def test_tenant_list_limit_bad_value(self):
+        client = self.get_client()
+        self.assertRaises(client_exceptions.BadRequest,
+                          client.tenants.list, limit='a')
+        self.assertRaises(client_exceptions.BadRequest,
+                          client.tenants.list, limit=-1)
+
+    def test_roles_get_by_user(self):
+        client = self.get_client(admin=True)
+        roles = client.roles.roles_for_user(user=self.user_foo['id'],
+                                            tenant=self.tenant_bar['id'])
+        self.assertTrue(len(roles) > 0)
+
+    def test_user_can_update_passwd(self):
+        client = self.get_client(self.user_two)
+
+        token_id = client.auth_token
+        new_password = uuid.uuid4().hex
+
+        # TODO(derekh): Update to use keystoneclient when available
+        class FakeResponse(object):
+            def start_fake_response(self, status, headers):
+                self.response_status = int(status.split(' ', 1)[0])
+                self.response_headers = dict(headers)
+        responseobject = FakeResponse()
+
+        req = webob.Request.blank(
+            '/v2.0/OS-KSCRUD/users/%s' % self.user_two['id'],
+            headers={'X-Auth-Token': token_id})
+        req.method = 'PATCH'
+        req.body = ('{"user":{"password":"%s","original_password":"%s"}}' %
+                    (new_password, self.user_two['password']))
+        self.public_server.application(req.environ,
+                                       responseobject.start_fake_response)
+
+        self.user_two['password'] = new_password
+        self.get_client(self.user_two)
+
+    def test_user_cannot_update_other_users_passwd(self):
+        client = self.get_client(self.user_two)
+
+        token_id = client.auth_token
+        new_password = uuid.uuid4().hex
+
+        # TODO(derekh): Update to use keystoneclient when available
+        class FakeResponse(object):
+            def start_fake_response(self, status, headers):
+                self.response_status = int(status.split(' ', 1)[0])
+                self.response_headers = dict(headers)
+        responseobject = FakeResponse()
+
+        req = webob.Request.blank(
+            '/v2.0/OS-KSCRUD/users/%s' % self.user_foo['id'],
+            headers={'X-Auth-Token': token_id})
+        req.method = 'PATCH'
+        req.body = ('{"user":{"password":"%s","original_password":"%s"}}' %
+                    (new_password, self.user_two['password']))
+        self.public_server.application(req.environ,
+                                       responseobject.start_fake_response)
+        self.assertEqual(403, responseobject.response_status)
+
+        self.user_two['password'] = new_password
+        self.assertRaises(client_exceptions.Unauthorized,
+                          self.get_client, self.user_two)
+
+    def test_tokens_after_user_update_passwd(self):
+        client = self.get_client(self.user_two)
+
+        token_id = client.auth_token
+        new_password = uuid.uuid4().hex
+
+        # TODO(derekh): Update to use keystoneclient when available
+        class FakeResponse(object):
+            def start_fake_response(self, status, headers):
+                self.response_status = int(status.split(' ', 1)[0])
+                self.response_headers = dict(headers)
+        responseobject = FakeResponse()
+
+        req = webob.Request.blank(
+            '/v2.0/OS-KSCRUD/users/%s' % self.user_two['id'],
+            headers={'X-Auth-Token': token_id})
+        req.method = 'PATCH'
+        req.body = ('{"user":{"password":"%s","original_password":"%s"}}' %
+                    (new_password, self.user_two['password']))
+
+        rv = self.public_server.application(
+            req.environ,
+            responseobject.start_fake_response)
+        response_json = jsonutils.loads(rv.pop())
+        new_token_id = response_json['access']['token']['id']
+
+        self.assertRaises(client_exceptions.Unauthorized, client.tenants.list)
+        client.auth_token = new_token_id
+        client.tenants.list()
diff --git a/keystone-moon/keystone/tests/unit/test_v2_keystoneclient_sql.py b/keystone-moon/keystone/tests/unit/test_v2_keystoneclient_sql.py
new file mode 100644 (file)
index 0000000..0fb60fd
--- /dev/null
@@ -0,0 +1,344 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from keystoneclient.contrib.ec2 import utils as ec2_utils
+from keystoneclient import exceptions as client_exceptions
+
+from keystone.tests import unit as tests
+from keystone.tests.unit import test_v2_keystoneclient
+
+
+class ClientDrivenSqlTestCase(test_v2_keystoneclient.ClientDrivenTestCase):
+    def config_files(self):
+        config_files = super(ClientDrivenSqlTestCase, self).config_files()
+        config_files.append(tests.dirs.tests_conf('backend_sql.conf'))
+        return config_files
+
+    def setUp(self):
+        super(ClientDrivenSqlTestCase, self).setUp()
+        self.default_client = self.get_client()
+        self.addCleanup(self.cleanup_instance('default_client'))
+
+    def test_endpoint_crud(self):
+        client = self.get_client(admin=True)
+
+        service = client.services.create(name=uuid.uuid4().hex,
+                                         service_type=uuid.uuid4().hex,
+                                         description=uuid.uuid4().hex)
+
+        endpoint_region = uuid.uuid4().hex
+        invalid_service_id = uuid.uuid4().hex
+        endpoint_publicurl = uuid.uuid4().hex
+        endpoint_internalurl = uuid.uuid4().hex
+        endpoint_adminurl = uuid.uuid4().hex
+
+        # a non-existent service ID should trigger a 400
+        self.assertRaises(client_exceptions.BadRequest,
+                          client.endpoints.create,
+                          region=endpoint_region,
+                          service_id=invalid_service_id,
+                          publicurl=endpoint_publicurl,
+                          adminurl=endpoint_adminurl,
+                          internalurl=endpoint_internalurl)
+
+        endpoint = client.endpoints.create(region=endpoint_region,
+                                           service_id=service.id,
+                                           publicurl=endpoint_publicurl,
+                                           adminurl=endpoint_adminurl,
+                                           internalurl=endpoint_internalurl)
+
+        self.assertEqual(endpoint_region, endpoint.region)
+        self.assertEqual(service.id, endpoint.service_id)
+        self.assertEqual(endpoint_publicurl, endpoint.publicurl)
+        self.assertEqual(endpoint_internalurl, endpoint.internalurl)
+        self.assertEqual(endpoint_adminurl, endpoint.adminurl)
+
+        client.endpoints.delete(id=endpoint.id)
+        self.assertRaises(client_exceptions.NotFound, client.endpoints.delete,
+                          id=endpoint.id)
+
+    def _send_ec2_auth_request(self, credentials, client=None):
+        if not client:
+            client = self.default_client
+        url = '%s/ec2tokens' % self.default_client.auth_url
+        (resp, token) = client.request(
+            url=url, method='POST',
+            body={'credentials': credentials})
+        return resp, token
+
+    def _generate_default_user_ec2_credentials(self):
+        cred = self. default_client.ec2.create(
+            user_id=self.user_foo['id'],
+            tenant_id=self.tenant_bar['id'])
+        return self._generate_user_ec2_credentials(cred.access, cred.secret)
+
+    def _generate_user_ec2_credentials(self, access, secret):
+        signer = ec2_utils.Ec2Signer(secret)
+        credentials = {'params': {'SignatureVersion': '2'},
+                       'access': access,
+                       'verb': 'GET',
+                       'host': 'localhost',
+                       'path': '/service/cloud'}
+        signature = signer.generate(credentials)
+        return credentials, signature
+
+    def test_ec2_auth_success(self):
+        credentials, signature = self._generate_default_user_ec2_credentials()
+        credentials['signature'] = signature
+        resp, token = self._send_ec2_auth_request(credentials)
+        self.assertEqual(200, resp.status_code)
+        self.assertIn('access', token)
+
+    def test_ec2_auth_success_trust(self):
+        # Add "other" role user_foo and create trust delegating it to user_two
+        self.assignment_api.add_role_to_user_and_project(
+            self.user_foo['id'],
+            self.tenant_bar['id'],
+            self.role_other['id'])
+        trust_id = 'atrust123'
+        trust = {'trustor_user_id': self.user_foo['id'],
+                 'trustee_user_id': self.user_two['id'],
+                 'project_id': self.tenant_bar['id'],
+                 'impersonation': True}
+        roles = [self.role_other]
+        self.trust_api.create_trust(trust_id, trust, roles)
+
+        # Create a client for user_two, scoped to the trust
+        client = self.get_client(self.user_two)
+        ret = client.authenticate(trust_id=trust_id,
+                                  tenant_id=self.tenant_bar['id'])
+        self.assertTrue(ret)
+        self.assertTrue(client.auth_ref.trust_scoped)
+        self.assertEqual(trust_id, client.auth_ref.trust_id)
+
+        # Create an ec2 keypair using the trust client impersonating user_foo
+        cred = client.ec2.create(user_id=self.user_foo['id'],
+                                 tenant_id=self.tenant_bar['id'])
+        credentials, signature = self._generate_user_ec2_credentials(
+            cred.access, cred.secret)
+        credentials['signature'] = signature
+        resp, token = self._send_ec2_auth_request(credentials)
+        self.assertEqual(200, resp.status_code)
+        self.assertEqual(trust_id, token['access']['trust']['id'])
+        # TODO(shardy) we really want to check the roles and trustee
+        # but because of where the stubbing happens we don't seem to
+        # hit the necessary code in controllers.py _authenticate_token
+        # so although all is OK via a real request, it incorrect in
+        # this test..
+
+    def test_ec2_auth_failure(self):
+        credentials, signature = self._generate_default_user_ec2_credentials()
+        credentials['signature'] = uuid.uuid4().hex
+        self.assertRaises(client_exceptions.Unauthorized,
+                          self._send_ec2_auth_request,
+                          credentials)
+
+    def test_ec2_credential_crud(self):
+        creds = self.default_client.ec2.list(user_id=self.user_foo['id'])
+        self.assertEqual([], creds)
+
+        cred = self.default_client.ec2.create(user_id=self.user_foo['id'],
+                                              tenant_id=self.tenant_bar['id'])
+        creds = self.default_client.ec2.list(user_id=self.user_foo['id'])
+        self.assertEqual(creds, [cred])
+        got = self.default_client.ec2.get(user_id=self.user_foo['id'],
+                                          access=cred.access)
+        self.assertEqual(cred, got)
+
+        self.default_client.ec2.delete(user_id=self.user_foo['id'],
+                                       access=cred.access)
+        creds = self.default_client.ec2.list(user_id=self.user_foo['id'])
+        self.assertEqual([], creds)
+
+    def test_ec2_credential_crud_non_admin(self):
+        na_client = self.get_client(self.user_two)
+        creds = na_client.ec2.list(user_id=self.user_two['id'])
+        self.assertEqual([], creds)
+
+        cred = na_client.ec2.create(user_id=self.user_two['id'],
+                                    tenant_id=self.tenant_baz['id'])
+        creds = na_client.ec2.list(user_id=self.user_two['id'])
+        self.assertEqual(creds, [cred])
+        got = na_client.ec2.get(user_id=self.user_two['id'],
+                                access=cred.access)
+        self.assertEqual(cred, got)
+
+        na_client.ec2.delete(user_id=self.user_two['id'],
+                             access=cred.access)
+        creds = na_client.ec2.list(user_id=self.user_two['id'])
+        self.assertEqual([], creds)
+
+    def test_ec2_list_credentials(self):
+        cred_1 = self.default_client.ec2.create(
+            user_id=self.user_foo['id'],
+            tenant_id=self.tenant_bar['id'])
+        cred_2 = self.default_client.ec2.create(
+            user_id=self.user_foo['id'],
+            tenant_id=self.tenant_service['id'])
+        cred_3 = self.default_client.ec2.create(
+            user_id=self.user_foo['id'],
+            tenant_id=self.tenant_mtu['id'])
+        two = self.get_client(self.user_two)
+        cred_4 = two.ec2.create(user_id=self.user_two['id'],
+                                tenant_id=self.tenant_bar['id'])
+        creds = self.default_client.ec2.list(user_id=self.user_foo['id'])
+        self.assertEqual(3, len(creds))
+        self.assertEqual(sorted([cred_1, cred_2, cred_3],
+                                key=lambda x: x.access),
+                         sorted(creds, key=lambda x: x.access))
+        self.assertNotIn(cred_4, creds)
+
+    def test_ec2_credentials_create_404(self):
+        self.assertRaises(client_exceptions.NotFound,
+                          self.default_client.ec2.create,
+                          user_id=uuid.uuid4().hex,
+                          tenant_id=self.tenant_bar['id'])
+        self.assertRaises(client_exceptions.NotFound,
+                          self.default_client.ec2.create,
+                          user_id=self.user_foo['id'],
+                          tenant_id=uuid.uuid4().hex)
+
+    def test_ec2_credentials_delete_404(self):
+        self.assertRaises(client_exceptions.NotFound,
+                          self.default_client.ec2.delete,
+                          user_id=uuid.uuid4().hex,
+                          access=uuid.uuid4().hex)
+
+    def test_ec2_credentials_get_404(self):
+        self.assertRaises(client_exceptions.NotFound,
+                          self.default_client.ec2.get,
+                          user_id=uuid.uuid4().hex,
+                          access=uuid.uuid4().hex)
+
+    def test_ec2_credentials_list_404(self):
+        self.assertRaises(client_exceptions.NotFound,
+                          self.default_client.ec2.list,
+                          user_id=uuid.uuid4().hex)
+
+    def test_ec2_credentials_list_user_forbidden(self):
+        two = self.get_client(self.user_two)
+        self.assertRaises(client_exceptions.Forbidden, two.ec2.list,
+                          user_id=self.user_foo['id'])
+
+    def test_ec2_credentials_get_user_forbidden(self):
+        cred = self.default_client.ec2.create(user_id=self.user_foo['id'],
+                                              tenant_id=self.tenant_bar['id'])
+
+        two = self.get_client(self.user_two)
+        self.assertRaises(client_exceptions.Forbidden, two.ec2.get,
+                          user_id=self.user_foo['id'], access=cred.access)
+
+        self.default_client.ec2.delete(user_id=self.user_foo['id'],
+                                       access=cred.access)
+
+    def test_ec2_credentials_delete_user_forbidden(self):
+        cred = self.default_client.ec2.create(user_id=self.user_foo['id'],
+                                              tenant_id=self.tenant_bar['id'])
+
+        two = self.get_client(self.user_two)
+        self.assertRaises(client_exceptions.Forbidden, two.ec2.delete,
+                          user_id=self.user_foo['id'], access=cred.access)
+
+        self.default_client.ec2.delete(user_id=self.user_foo['id'],
+                                       access=cred.access)
+
+    def test_endpoint_create_nonexistent_service(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.BadRequest,
+                          client.endpoints.create,
+                          region=uuid.uuid4().hex,
+                          service_id=uuid.uuid4().hex,
+                          publicurl=uuid.uuid4().hex,
+                          adminurl=uuid.uuid4().hex,
+                          internalurl=uuid.uuid4().hex)
+
+    def test_endpoint_delete_404(self):
+        client = self.get_client(admin=True)
+        self.assertRaises(client_exceptions.NotFound,
+                          client.endpoints.delete,
+                          id=uuid.uuid4().hex)
+
+    def test_policy_crud(self):
+        # FIXME(dolph): this test was written prior to the v3 implementation of
+        #               the client and essentially refers to a non-existent
+        #               policy manager in the v2 client. this test needs to be
+        #               moved to a test suite running against the v3 api
+        self.skipTest('Written prior to v3 client; needs refactor')
+
+        client = self.get_client(admin=True)
+
+        policy_blob = uuid.uuid4().hex
+        policy_type = uuid.uuid4().hex
+        service = client.services.create(
+            name=uuid.uuid4().hex,
+            service_type=uuid.uuid4().hex,
+            description=uuid.uuid4().hex)
+        endpoint = client.endpoints.create(
+            service_id=service.id,
+            region=uuid.uuid4().hex,
+            adminurl=uuid.uuid4().hex,
+            internalurl=uuid.uuid4().hex,
+            publicurl=uuid.uuid4().hex)
+
+        # create
+        policy = client.policies.create(
+            blob=policy_blob,
+            type=policy_type,
+            endpoint=endpoint.id)
+        self.assertEqual(policy_blob, policy.policy)
+        self.assertEqual(policy_type, policy.type)
+        self.assertEqual(endpoint.id, policy.endpoint_id)
+
+        policy = client.policies.get(policy=policy.id)
+        self.assertEqual(policy_blob, policy.policy)
+        self.assertEqual(policy_type, policy.type)
+        self.assertEqual(endpoint.id, policy.endpoint_id)
+
+        endpoints = [x for x in client.endpoints.list() if x.id == endpoint.id]
+        endpoint = endpoints[0]
+        self.assertEqual(policy_blob, policy.policy)
+        self.assertEqual(policy_type, policy.type)
+        self.assertEqual(endpoint.id, policy.endpoint_id)
+
+        # update
+        policy_blob = uuid.uuid4().hex
+        policy_type = uuid.uuid4().hex
+        endpoint = client.endpoints.create(
+            service_id=service.id,
+            region=uuid.uuid4().hex,
+            adminurl=uuid.uuid4().hex,
+            internalurl=uuid.uuid4().hex,
+            publicurl=uuid.uuid4().hex)
+
+        policy = client.policies.update(
+            policy=policy.id,
+            blob=policy_blob,
+            type=policy_type,
+            endpoint=endpoint.id)
+
+        policy = client.policies.get(policy=policy.id)
+        self.assertEqual(policy_blob, policy.policy)
+        self.assertEqual(policy_type, policy.type)
+        self.assertEqual(endpoint.id, policy.endpoint_id)
+
+        # delete
+        client.policies.delete(policy=policy.id)
+        self.assertRaises(
+            client_exceptions.NotFound,
+            client.policies.get,
+            policy=policy.id)
+        policies = [x for x in client.policies.list() if x.id == policy.id]
+        self.assertEqual(0, len(policies))
diff --git a/keystone-moon/keystone/tests/unit/test_v3.py b/keystone-moon/keystone/tests/unit/test_v3.py
new file mode 100644 (file)
index 0000000..f6d6ed9
--- /dev/null
@@ -0,0 +1,1283 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import uuid
+
+from oslo_config import cfg
+from oslo_serialization import jsonutils
+from oslo_utils import timeutils
+import six
+from testtools import matchers
+
+from keystone import auth
+from keystone.common import authorization
+from keystone.common import cache
+from keystone import exception
+from keystone import middleware
+from keystone.policy.backends import rules
+from keystone.tests import unit as tests
+from keystone.tests.unit import rest
+
+
+CONF = cfg.CONF
+DEFAULT_DOMAIN_ID = 'default'
+
+TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
+
+
+class AuthTestMixin(object):
+    """To hold auth building helper functions."""
+    def build_auth_scope(self, project_id=None, project_name=None,
+                         project_domain_id=None, project_domain_name=None,
+                         domain_id=None, domain_name=None, trust_id=None,
+                         unscoped=None):
+        scope_data = {}
+        if unscoped:
+            scope_data['unscoped'] = {}
+        if project_id or project_name:
+            scope_data['project'] = {}
+            if project_id:
+                scope_data['project']['id'] = project_id
+            else:
+                scope_data['project']['name'] = project_name
+                if project_domain_id or project_domain_name:
+                    project_domain_json = {}
+                    if project_domain_id:
+                        project_domain_json['id'] = project_domain_id
+                    else:
+                        project_domain_json['name'] = project_domain_name
+                    scope_data['project']['domain'] = project_domain_json
+        if domain_id or domain_name:
+            scope_data['domain'] = {}
+            if domain_id:
+                scope_data['domain']['id'] = domain_id
+            else:
+                scope_data['domain']['name'] = domain_name
+        if trust_id:
+            scope_data['OS-TRUST:trust'] = {}
+            scope_data['OS-TRUST:trust']['id'] = trust_id
+        return scope_data
+
+    def build_password_auth(self, user_id=None, username=None,
+                            user_domain_id=None, user_domain_name=None,
+                            password=None):
+        password_data = {'user': {}}
+        if user_id:
+            password_data['user']['id'] = user_id
+        else:
+            password_data['user']['name'] = username
+            if user_domain_id or user_domain_name:
+                password_data['user']['domain'] = {}
+                if user_domain_id:
+                    password_data['user']['domain']['id'] = user_domain_id
+                else:
+                    password_data['user']['domain']['name'] = user_domain_name
+        password_data['user']['password'] = password
+        return password_data
+
+    def build_token_auth(self, token):
+        return {'id': token}
+
+    def build_authentication_request(self, token=None, user_id=None,
+                                     username=None, user_domain_id=None,
+                                     user_domain_name=None, password=None,
+                                     kerberos=False, **kwargs):
+        """Build auth dictionary.
+
+        It will create an auth dictionary based on all the arguments
+        that it receives.
+        """
+        auth_data = {}
+        auth_data['identity'] = {'methods': []}
+        if kerberos:
+            auth_data['identity']['methods'].append('kerberos')
+            auth_data['identity']['kerberos'] = {}
+        if token:
+            auth_data['identity']['methods'].append('token')
+            auth_data['identity']['token'] = self.build_token_auth(token)
+        if user_id or username:
+            auth_data['identity']['methods'].append('password')
+            auth_data['identity']['password'] = self.build_password_auth(
+                user_id, username, user_domain_id, user_domain_name, password)
+        if kwargs:
+            auth_data['scope'] = self.build_auth_scope(**kwargs)
+        return {'auth': auth_data}
+
+
+class RestfulTestCase(tests.SQLDriverOverrides, rest.RestfulTestCase,
+                      AuthTestMixin):
+    def config_files(self):
+        config_files = super(RestfulTestCase, self).config_files()
+        config_files.append(tests.dirs.tests_conf('backend_sql.conf'))
+        return config_files
+
+    def get_extensions(self):
+        extensions = set(['revoke'])
+        if hasattr(self, 'EXTENSION_NAME'):
+            extensions.add(self.EXTENSION_NAME)
+        return extensions
+
+    def generate_paste_config(self):
+        new_paste_file = None
+        try:
+            new_paste_file = tests.generate_paste_config(self.EXTENSION_TO_ADD)
+        except AttributeError:
+            # no need to report this error here, as most tests will not have
+            # EXTENSION_TO_ADD defined.
+            pass
+        finally:
+            return new_paste_file
+
+    def remove_generated_paste_config(self):
+        try:
+            tests.remove_generated_paste_config(self.EXTENSION_TO_ADD)
+        except AttributeError:
+            pass
+
+    def setUp(self, app_conf='keystone'):
+        """Setup for v3 Restful Test Cases.
+
+        """
+        new_paste_file = self.generate_paste_config()
+        self.addCleanup(self.remove_generated_paste_config)
+        if new_paste_file:
+            app_conf = 'config:%s' % (new_paste_file)
+
+        super(RestfulTestCase, self).setUp(app_conf=app_conf)
+
+        self.empty_context = {'environment': {}}
+
+        # Initialize the policy engine and allow us to write to a temp
+        # file in each test to create the policies
+        rules.reset()
+
+        # drop the policy rules
+        self.addCleanup(rules.reset)
+
+    def load_backends(self):
+        # ensure the cache region instance is setup
+        cache.configure_cache_region(cache.REGION)
+
+        super(RestfulTestCase, self).load_backends()
+
+    def load_fixtures(self, fixtures):
+        self.load_sample_data()
+
+    def _populate_default_domain(self):
+        if CONF.database.connection == tests.IN_MEM_DB_CONN_STRING:
+            # NOTE(morganfainberg): If an in-memory db is being used, be sure
+            # to populate the default domain, this is typically done by
+            # a migration, but the in-mem db uses model definitions  to create
+            # the schema (no migrations are run).
+            try:
+                self.resource_api.get_domain(DEFAULT_DOMAIN_ID)
+            except exception.DomainNotFound:
+                domain = {'description': (u'Owns users and tenants (i.e. '
+                                          u'projects) available on Identity '
+                                          u'API v2.'),
+                          'enabled': True,
+                          'id': DEFAULT_DOMAIN_ID,
+                          'name': u'Default'}
+                self.resource_api.create_domain(DEFAULT_DOMAIN_ID, domain)
+
+    def load_sample_data(self):
+        self._populate_default_domain()
+        self.domain_id = uuid.uuid4().hex
+        self.domain = self.new_domain_ref()
+        self.domain['id'] = self.domain_id
+        self.resource_api.create_domain(self.domain_id, self.domain)
+
+        self.project_id = uuid.uuid4().hex
+        self.project = self.new_project_ref(
+            domain_id=self.domain_id)
+        self.project['id'] = self.project_id
+        self.resource_api.create_project(self.project_id, self.project)
+
+        self.user = self.new_user_ref(domain_id=self.domain_id)
+        password = self.user['password']
+        self.user = self.identity_api.create_user(self.user)
+        self.user['password'] = password
+        self.user_id = self.user['id']
+
+        self.default_domain_project_id = uuid.uuid4().hex
+        self.default_domain_project = self.new_project_ref(
+            domain_id=DEFAULT_DOMAIN_ID)
+        self.default_domain_project['id'] = self.default_domain_project_id
+        self.resource_api.create_project(self.default_domain_project_id,
+                                         self.default_domain_project)
+
+        self.default_domain_user = self.new_user_ref(
+            domain_id=DEFAULT_DOMAIN_ID)
+        password = self.default_domain_user['password']
+        self.default_domain_user = (
+            self.identity_api.create_user(self.default_domain_user))
+        self.default_domain_user['password'] = password
+        self.default_domain_user_id = self.default_domain_user['id']
+
+        # create & grant policy.json's default role for admin_required
+        self.role_id = uuid.uuid4().hex
+        self.role = self.new_role_ref()
+        self.role['id'] = self.role_id
+        self.role['name'] = 'admin'
+        self.role_api.create_role(self.role_id, self.role)
+        self.assignment_api.add_role_to_user_and_project(
+            self.user_id, self.project_id, self.role_id)
+        self.assignment_api.add_role_to_user_and_project(
+            self.default_domain_user_id, self.default_domain_project_id,
+            self.role_id)
+        self.assignment_api.add_role_to_user_and_project(
+            self.default_domain_user_id, self.project_id,
+            self.role_id)
+
+        self.region_id = uuid.uuid4().hex
+        self.region = self.new_region_ref()
+        self.region['id'] = self.region_id
+        self.catalog_api.create_region(
+            self.region.copy())
+
+        self.service_id = uuid.uuid4().hex
+        self.service = self.new_service_ref()
+        self.service['id'] = self.service_id
+        self.catalog_api.create_service(
+            self.service_id,
+            self.service.copy())
+
+        self.endpoint_id = uuid.uuid4().hex
+        self.endpoint = self.new_endpoint_ref(service_id=self.service_id)
+        self.endpoint['id'] = self.endpoint_id
+        self.endpoint['region_id'] = self.region['id']
+        self.catalog_api.create_endpoint(
+            self.endpoint_id,
+            self.endpoint.copy())
+        # The server adds 'enabled' and defaults to True.
+        self.endpoint['enabled'] = True
+
+    def new_ref(self):
+        """Populates a ref with attributes common to all API entities."""
+        return {
+            'id': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+            'enabled': True}
+
+    def new_region_ref(self):
+        ref = self.new_ref()
+        # Region doesn't have name or enabled.
+        del ref['name']
+        del ref['enabled']
+        ref['parent_region_id'] = None
+        return ref
+
+    def new_service_ref(self):
+        ref = self.new_ref()
+        ref['type'] = uuid.uuid4().hex
+        return ref
+
+    def new_endpoint_ref(self, service_id, interface='public', **kwargs):
+        ref = self.new_ref()
+        del ref['enabled']  # enabled is optional
+        ref['interface'] = interface
+        ref['service_id'] = service_id
+        ref['url'] = 'https://' + uuid.uuid4().hex + '.com'
+        ref['region_id'] = self.region_id
+        ref.update(kwargs)
+        return ref
+
+    def new_domain_ref(self):
+        ref = self.new_ref()
+        return ref
+
+    def new_project_ref(self, domain_id, parent_id=None):
+        ref = self.new_ref()
+        ref['domain_id'] = domain_id
+        ref['parent_id'] = parent_id
+        return ref
+
+    def new_user_ref(self, domain_id, project_id=None):
+        ref = self.new_ref()
+        ref['domain_id'] = domain_id
+        ref['email'] = uuid.uuid4().hex
+        ref['password'] = uuid.uuid4().hex
+        if project_id:
+            ref['default_project_id'] = project_id
+        return ref
+
+    def new_group_ref(self, domain_id):
+        ref = self.new_ref()
+        ref['domain_id'] = domain_id
+        return ref
+
+    def new_credential_ref(self, user_id, project_id=None, cred_type=None):
+        ref = dict()
+        ref['id'] = uuid.uuid4().hex
+        ref['user_id'] = user_id
+        if cred_type == 'ec2':
+            ref['type'] = 'ec2'
+            ref['blob'] = {'blah': 'test'}
+        else:
+            ref['type'] = 'cert'
+            ref['blob'] = uuid.uuid4().hex
+        if project_id:
+            ref['project_id'] = project_id
+        return ref
+
+    def new_role_ref(self):
+        ref = self.new_ref()
+        # Roles don't have a description or the enabled flag
+        del ref['description']
+        del ref['enabled']
+        return ref
+
+    def new_policy_ref(self):
+        ref = self.new_ref()
+        ref['blob'] = uuid.uuid4().hex
+        ref['type'] = uuid.uuid4().hex
+        return ref
+
+    def new_trust_ref(self, trustor_user_id, trustee_user_id, project_id=None,
+                      impersonation=None, expires=None, role_ids=None,
+                      role_names=None, remaining_uses=None,
+                      allow_redelegation=False):
+        ref = dict()
+        ref['id'] = uuid.uuid4().hex
+        ref['trustor_user_id'] = trustor_user_id
+        ref['trustee_user_id'] = trustee_user_id
+        ref['impersonation'] = impersonation or False
+        ref['project_id'] = project_id
+        ref['remaining_uses'] = remaining_uses
+        ref['allow_redelegation'] = allow_redelegation
+
+        if isinstance(expires, six.string_types):
+            ref['expires_at'] = expires
+        elif isinstance(expires, dict):
+            ref['expires_at'] = timeutils.strtime(
+                timeutils.utcnow() + datetime.timedelta(**expires),
+                fmt=TIME_FORMAT)
+        elif expires is None:
+            pass
+        else:
+            raise NotImplementedError('Unexpected value for "expires"')
+
+        role_ids = role_ids or []
+        role_names = role_names or []
+        if role_ids or role_names:
+            ref['roles'] = []
+            for role_id in role_ids:
+                ref['roles'].append({'id': role_id})
+            for role_name in role_names:
+                ref['roles'].append({'name': role_name})
+
+        return ref
+
+    def create_new_default_project_for_user(self, user_id, domain_id,
+                                            enable_project=True):
+        ref = self.new_project_ref(domain_id=domain_id)
+        ref['enabled'] = enable_project
+        r = self.post('/projects', body={'project': ref})
+        project = self.assertValidProjectResponse(r, ref)
+        # set the user's preferred project
+        body = {'user': {'default_project_id': project['id']}}
+        r = self.patch('/users/%(user_id)s' % {
+            'user_id': user_id},
+            body=body)
+        self.assertValidUserResponse(r)
+
+        return project
+
+    def get_scoped_token(self):
+        """Convenience method so that we can test authenticated requests."""
+        r = self.admin_request(
+            method='POST',
+            path='/v3/auth/tokens',
+            body={
+                'auth': {
+                    'identity': {
+                        'methods': ['password'],
+                        'password': {
+                            'user': {
+                                'name': self.user['name'],
+                                'password': self.user['password'],
+                                'domain': {
+                                    'id': self.user['domain_id']
+                                }
+                            }
+                        }
+                    },
+                    'scope': {
+                        'project': {
+                            'id': self.project['id'],
+                        }
+                    }
+                }
+            })
+        return r.headers.get('X-Subject-Token')
+
+    def get_requested_token(self, auth):
+        """Request the specific token we want."""
+
+        r = self.v3_authenticate_token(auth)
+        return r.headers.get('X-Subject-Token')
+
+    def v3_authenticate_token(self, auth, expected_status=201):
+        return self.admin_request(method='POST',
+                                  path='/v3/auth/tokens',
+                                  body=auth,
+                                  expected_status=expected_status)
+
+    def v3_noauth_request(self, path, **kwargs):
+        # request does not require auth token header
+        path = '/v3' + path
+        return self.admin_request(path=path, **kwargs)
+
+    def v3_request(self, path, **kwargs):
+        # check to see if caller requires token for the API call.
+        if kwargs.pop('noauth', None):
+            return self.v3_noauth_request(path, **kwargs)
+
+        # Check if the caller has passed in auth details for
+        # use in requesting the token
+        auth_arg = kwargs.pop('auth', None)
+        if auth_arg:
+            token = self.get_requested_token(auth_arg)
+        else:
+            token = kwargs.pop('token', None)
+            if not token:
+                token = self.get_scoped_token()
+        path = '/v3' + path
+
+        return self.admin_request(path=path, token=token, **kwargs)
+
+    def get(self, path, **kwargs):
+        r = self.v3_request(method='GET', path=path, **kwargs)
+        if 'expected_status' not in kwargs:
+            self.assertResponseStatus(r, 200)
+        return r
+
+    def head(self, path, **kwargs):
+        r = self.v3_request(method='HEAD', path=path, **kwargs)
+        if 'expected_status' not in kwargs:
+            self.assertResponseStatus(r, 204)
+        self.assertEqual('', r.body)
+        return r
+
+    def post(self, path, **kwargs):
+        r = self.v3_request(method='POST', path=path, **kwargs)
+        if 'expected_status' not in kwargs:
+            self.assertResponseStatus(r, 201)
+        return r
+
+    def put(self, path, **kwargs):
+        r = self.v3_request(method='PUT', path=path, **kwargs)
+        if 'expected_status' not in kwargs:
+            self.assertResponseStatus(r, 204)
+        return r
+
+    def patch(self, path, **kwargs):
+        r = self.v3_request(method='PATCH', path=path, **kwargs)
+        if 'expected_status' not in kwargs:
+            self.assertResponseStatus(r, 200)
+        return r
+
+    def delete(self, path, **kwargs):
+        r = self.v3_request(method='DELETE', path=path, **kwargs)
+        if 'expected_status' not in kwargs:
+            self.assertResponseStatus(r, 204)
+        return r
+
+    def assertValidErrorResponse(self, r):
+        resp = r.result
+        self.assertIsNotNone(resp.get('error'))
+        self.assertIsNotNone(resp['error'].get('code'))
+        self.assertIsNotNone(resp['error'].get('title'))
+        self.assertIsNotNone(resp['error'].get('message'))
+        self.assertEqual(int(resp['error']['code']), r.status_code)
+
+    def assertValidListLinks(self, links, resource_url=None):
+        self.assertIsNotNone(links)
+        self.assertIsNotNone(links.get('self'))
+        self.assertThat(links['self'], matchers.StartsWith('http://localhost'))
+
+        if resource_url:
+            self.assertThat(links['self'], matchers.EndsWith(resource_url))
+
+        self.assertIn('next', links)
+        if links['next'] is not None:
+            self.assertThat(links['next'],
+                            matchers.StartsWith('http://localhost'))
+
+        self.assertIn('previous', links)
+        if links['previous'] is not None:
+            self.assertThat(links['previous'],
+                            matchers.StartsWith('http://localhost'))
+
+    def assertValidListResponse(self, resp, key, entity_validator, ref=None,
+                                expected_length=None, keys_to_check=None,
+                                resource_url=None):
+        """Make assertions common to all API list responses.
+
+        If a reference is provided, it's ID will be searched for in the
+        response, and asserted to be equal.
+
+        """
+        entities = resp.result.get(key)
+        self.assertIsNotNone(entities)
+
+        if expected_length is not None:
+            self.assertEqual(expected_length, len(entities))
+        elif ref is not None:
+            # we're at least expecting the ref
+            self.assertNotEmpty(entities)
+
+        # collections should have relational links
+        self.assertValidListLinks(resp.result.get('links'),
+                                  resource_url=resource_url)
+
+        for entity in entities:
+            self.assertIsNotNone(entity)
+            self.assertValidEntity(entity, keys_to_check=keys_to_check)
+            entity_validator(entity)
+        if ref:
+            entity = [x for x in entities if x['id'] == ref['id']][0]
+            self.assertValidEntity(entity, ref=ref,
+                                   keys_to_check=keys_to_check)
+            entity_validator(entity, ref)
+        return entities
+
+    def assertValidResponse(self, resp, key, entity_validator, *args,
+                            **kwargs):
+        """Make assertions common to all API responses."""
+        entity = resp.result.get(key)
+        self.assertIsNotNone(entity)
+        keys = kwargs.pop('keys_to_check', None)
+        self.assertValidEntity(entity, keys_to_check=keys, *args, **kwargs)
+        entity_validator(entity, *args, **kwargs)
+        return entity
+
+    def assertValidEntity(self, entity, ref=None, keys_to_check=None):
+        """Make assertions common to all API entities.
+
+        If a reference is provided, the entity will also be compared against
+        the reference.
+        """
+        if keys_to_check is not None:
+            keys = keys_to_check
+        else:
+            keys = ['name', 'description', 'enabled']
+
+        for k in ['id'] + keys:
+            msg = '%s unexpectedly None in %s' % (k, entity)
+            self.assertIsNotNone(entity.get(k), msg)
+
+        self.assertIsNotNone(entity.get('links'))
+        self.assertIsNotNone(entity['links'].get('self'))
+        self.assertThat(entity['links']['self'],
+                        matchers.StartsWith('http://localhost'))
+        self.assertIn(entity['id'], entity['links']['self'])
+
+        if ref:
+            for k in keys:
+                msg = '%s not equal: %s != %s' % (k, ref[k], entity[k])
+                self.assertEqual(ref[k], entity[k])
+
+        return entity
+
+    def assertDictContainsSubset(self, expected, actual):
+        """"Asserts if dictionary actual is a superset of expected.
+
+        Tests whether the key/value pairs in dictionary actual are a superset
+        of those in expected.
+
+        """
+        for k, v in expected.iteritems():
+            self.assertIn(k, actual)
+            if isinstance(v, dict):
+                self.assertDictContainsSubset(v, actual[k])
+            else:
+                self.assertEqual(v, actual[k])
+
+    # auth validation
+
+    def assertValidISO8601ExtendedFormatDatetime(self, dt):
+        try:
+            return timeutils.parse_strtime(dt, fmt=TIME_FORMAT)
+        except Exception:
+            msg = '%s is not a valid ISO 8601 extended format date time.' % dt
+            raise AssertionError(msg)
+        self.assertIsInstance(dt, datetime.datetime)
+
+    def assertValidTokenResponse(self, r, user=None):
+        self.assertTrue(r.headers.get('X-Subject-Token'))
+        token = r.result['token']
+
+        self.assertIsNotNone(token.get('expires_at'))
+        expires_at = self.assertValidISO8601ExtendedFormatDatetime(
+            token['expires_at'])
+        self.assertIsNotNone(token.get('issued_at'))
+        issued_at = self.assertValidISO8601ExtendedFormatDatetime(
+            token['issued_at'])
+        self.assertTrue(issued_at < expires_at)
+
+        self.assertIn('user', token)
+        self.assertIn('id', token['user'])
+        self.assertIn('name', token['user'])
+        self.assertIn('domain', token['user'])
+        self.assertIn('id', token['user']['domain'])
+
+        if user is not None:
+            self.assertEqual(user['id'], token['user']['id'])
+            self.assertEqual(user['name'], token['user']['name'])
+            self.assertEqual(user['domain_id'], token['user']['domain']['id'])
+
+        return token
+
+    def assertValidUnscopedTokenResponse(self, r, *args, **kwargs):
+        token = self.assertValidTokenResponse(r, *args, **kwargs)
+
+        self.assertNotIn('roles', token)
+        self.assertNotIn('catalog', token)
+        self.assertNotIn('project', token)
+        self.assertNotIn('domain', token)
+
+        return token
+
+    def assertValidScopedTokenResponse(self, r, *args, **kwargs):
+        require_catalog = kwargs.pop('require_catalog', True)
+        endpoint_filter = kwargs.pop('endpoint_filter', False)
+        ep_filter_assoc = kwargs.pop('ep_filter_assoc', 0)
+        token = self.assertValidTokenResponse(r, *args, **kwargs)
+
+        if require_catalog:
+            endpoint_num = 0
+            self.assertIn('catalog', token)
+
+            if isinstance(token['catalog'], list):
+                # only test JSON
+                for service in token['catalog']:
+                    for endpoint in service['endpoints']:
+                        self.assertNotIn('enabled', endpoint)
+                        self.assertNotIn('legacy_endpoint_id', endpoint)
+                        self.assertNotIn('service_id', endpoint)
+                        endpoint_num += 1
+
+            # sub test for the OS-EP-FILTER extension enabled
+            if endpoint_filter:
+                self.assertEqual(ep_filter_assoc, endpoint_num)
+        else:
+            self.assertNotIn('catalog', token)
+
+        self.assertIn('roles', token)
+        self.assertTrue(token['roles'])
+        for role in token['roles']:
+            self.assertIn('id', role)
+            self.assertIn('name', role)
+
+        return token
+
+    def assertValidProjectScopedTokenResponse(self, r, *args, **kwargs):
+        token = self.assertValidScopedTokenResponse(r, *args, **kwargs)
+
+        self.assertIn('project', token)
+        self.assertIn('id', token['project'])
+        self.assertIn('name', token['project'])
+        self.assertIn('domain', token['project'])
+        self.assertIn('id', token['project']['domain'])
+        self.assertIn('name', token['project']['domain'])
+
+        self.assertEqual(self.role_id, token['roles'][0]['id'])
+
+        return token
+
+    def assertValidProjectTrustScopedTokenResponse(self, r, *args, **kwargs):
+        token = self.assertValidProjectScopedTokenResponse(r, *args, **kwargs)
+
+        trust = token.get('OS-TRUST:trust')
+        self.assertIsNotNone(trust)
+        self.assertIsNotNone(trust.get('id'))
+        self.assertIsInstance(trust.get('impersonation'), bool)
+        self.assertIsNotNone(trust.get('trustor_user'))
+        self.assertIsNotNone(trust.get('trustee_user'))
+        self.assertIsNotNone(trust['trustor_user'].get('id'))
+        self.assertIsNotNone(trust['trustee_user'].get('id'))
+
+    def assertValidDomainScopedTokenResponse(self, r, *args, **kwargs):
+        token = self.assertValidScopedTokenResponse(r, *args, **kwargs)
+
+        self.assertIn('domain', token)
+        self.assertIn('id', token['domain'])
+        self.assertIn('name', token['domain'])
+
+        return token
+
+    def assertEqualTokens(self, a, b):
+        """Assert that two tokens are equal.
+
+        Compare two tokens except for their ids. This also truncates
+        the time in the comparison.
+        """
+        def normalize(token):
+            del token['token']['expires_at']
+            del token['token']['issued_at']
+            return token
+
+        a_expires_at = self.assertValidISO8601ExtendedFormatDatetime(
+            a['token']['expires_at'])
+        b_expires_at = self.assertValidISO8601ExtendedFormatDatetime(
+            b['token']['expires_at'])
+        self.assertCloseEnoughForGovernmentWork(a_expires_at, b_expires_at)
+
+        a_issued_at = self.assertValidISO8601ExtendedFormatDatetime(
+            a['token']['issued_at'])
+        b_issued_at = self.assertValidISO8601ExtendedFormatDatetime(
+            b['token']['issued_at'])
+        self.assertCloseEnoughForGovernmentWork(a_issued_at, b_issued_at)
+
+        return self.assertDictEqual(normalize(a), normalize(b))
+
+    # catalog validation
+
+    def assertValidCatalogResponse(self, resp, *args, **kwargs):
+        self.assertEqual(set(['catalog', 'links']), set(resp.json.keys()))
+        self.assertValidCatalog(resp.json['catalog'])
+        self.assertIn('links', resp.json)
+        self.assertIsInstance(resp.json['links'], dict)
+        self.assertEqual(['self'], resp.json['links'].keys())
+        self.assertEqual(
+            'http://localhost/v3/auth/catalog',
+            resp.json['links']['self'])
+
+    def assertValidCatalog(self, entity):
+        self.assertIsInstance(entity, list)
+        self.assertTrue(len(entity) > 0)
+        for service in entity:
+            self.assertIsNotNone(service.get('id'))
+            self.assertIsNotNone(service.get('name'))
+            self.assertIsNotNone(service.get('type'))
+            self.assertNotIn('enabled', service)
+            self.assertTrue(len(service['endpoints']) > 0)
+            for endpoint in service['endpoints']:
+                self.assertIsNotNone(endpoint.get('id'))
+                self.assertIsNotNone(endpoint.get('interface'))
+                self.assertIsNotNone(endpoint.get('url'))
+                self.assertNotIn('enabled', endpoint)
+                self.assertNotIn('legacy_endpoint_id', endpoint)
+                self.assertNotIn('service_id', endpoint)
+
+    # region validation
+
+    def assertValidRegionListResponse(self, resp, *args, **kwargs):
+        # NOTE(jaypipes): I have to pass in a blank keys_to_check parameter
+        #                 below otherwise the base assertValidEntity method
+        #                 tries to find a "name" and an "enabled" key in the
+        #                 returned ref dicts. The issue is, I don't understand
+        #                 how the service and endpoint entity assertions below
+        #                 actually work (they don't raise assertions), since
+        #                 AFAICT, the service and endpoint tables don't have
+        #                 a "name" column either... :(
+        return self.assertValidListResponse(
+            resp,
+            'regions',
+            self.assertValidRegion,
+            keys_to_check=[],
+            *args,
+            **kwargs)
+
+    def assertValidRegionResponse(self, resp, *args, **kwargs):
+        return self.assertValidResponse(
+            resp,
+            'region',
+            self.assertValidRegion,
+            keys_to_check=[],
+            *args,
+            **kwargs)
+
+    def assertValidRegion(self, entity, ref=None):
+        self.assertIsNotNone(entity.get('description'))
+        if ref:
+            self.assertEqual(ref['description'], entity['description'])
+        return entity
+
+    # service validation
+
+    def assertValidServiceListResponse(self, resp, *args, **kwargs):
+        return self.assertValidListResponse(
+            resp,
+            'services',
+            self.assertValidService,
+            *args,
+            **kwargs)
+
+    def assertValidServiceResponse(self, resp, *args, **kwargs):
+        return self.assertValidResponse(
+            resp,
+            'service',
+            self.assertValidService,
+            *args,
+            **kwargs)
+
+    def assertValidService(self, entity, ref=None):
+        self.assertIsNotNone(entity.get('type'))
+        self.assertIsInstance(entity.get('enabled'), bool)
+        if ref:
+            self.assertEqual(ref['type'], entity['type'])
+        return entity
+
+    # endpoint validation
+
+    def assertValidEndpointListResponse(self, resp, *args, **kwargs):
+        return self.assertValidListResponse(
+            resp,
+            'endpoints',
+            self.assertValidEndpoint,
+            *args,
+            **kwargs)
+
+    def assertValidEndpointResponse(self, resp, *args, **kwargs):
+        return self.assertValidResponse(
+            resp,
+            'endpoint',
+            self.assertValidEndpoint,
+            *args,
+            **kwargs)
+
+    def assertValidEndpoint(self, entity, ref=None):
+        self.assertIsNotNone(entity.get('interface'))
+        self.assertIsNotNone(entity.get('service_id'))
+        self.assertIsInstance(entity['enabled'], bool)
+
+        # this is intended to be an unexposed implementation detail
+        self.assertNotIn('legacy_endpoint_id', entity)
+
+        if ref:
+            self.assertEqual(ref['interface'], entity['interface'])
+            self.assertEqual(ref['service_id'], entity['service_id'])
+            if ref.get('region') is not None:
+                self.assertEqual(ref['region_id'], entity.get('region_id'))
+
+        return entity
+
+    # domain validation
+
+    def assertValidDomainListResponse(self, resp, *args, **kwargs):
+        return self.assertValidListResponse(
+            resp,
+            'domains',
+            self.assertValidDomain,
+            *args,
+            **kwargs)
+
+    def assertValidDomainResponse(self, resp, *args, **kwargs):
+        return self.assertValidResponse(
+            resp,
+            'domain',
+            self.assertValidDomain,
+            *args,
+            **kwargs)
+
+    def assertValidDomain(self, entity, ref=None):
+        if ref:
+            pass
+        return entity
+
+    # project validation
+
+    def assertValidProjectListResponse(self, resp, *args, **kwargs):
+        return self.assertValidListResponse(
+            resp,
+            'projects',
+            self.assertValidProject,
+            *args,
+            **kwargs)
+
+    def assertValidProjectResponse(self, resp, *args, **kwargs):
+        return self.assertValidResponse(
+            resp,
+            'project',
+            self.assertValidProject,
+            *args,
+            **kwargs)
+
+    def assertValidProject(self, entity, ref=None):
+        self.assertIsNotNone(entity.get('domain_id'))
+        if ref:
+            self.assertEqual(ref['domain_id'], entity['domain_id'])
+        return entity
+
+    # user validation
+
+    def assertValidUserListResponse(self, resp, *args, **kwargs):
+        return self.assertValidListResponse(
+            resp,
+            'users',
+            self.assertValidUser,
+            *args,
+            **kwargs)
+
+    def assertValidUserResponse(self, resp, *args, **kwargs):
+        return self.assertValidResponse(
+            resp,
+            'user',
+            self.assertValidUser,
+            *args,
+            **kwargs)
+
+    def assertValidUser(self, entity, ref=None):
+        self.assertIsNotNone(entity.get('domain_id'))
+        self.assertIsNotNone(entity.get('email'))
+        self.assertIsNone(entity.get('password'))
+        self.assertNotIn('tenantId', entity)
+        if ref:
+            self.assertEqual(ref['domain_id'], entity['domain_id'])
+            self.assertEqual(ref['email'], entity['email'])
+            if 'default_project_id' in ref:
+                self.assertIsNotNone(ref['default_project_id'])
+                self.assertEqual(ref['default_project_id'],
+                                 entity['default_project_id'])
+        return entity
+
+    # group validation
+
+    def assertValidGroupListResponse(self, resp, *args, **kwargs):
+        return self.assertValidListResponse(
+            resp,
+            'groups',
+            self.assertValidGroup,
+            *args,
+            **kwargs)
+
+    def assertValidGroupResponse(self, resp, *args, **kwargs):
+        return self.assertValidResponse(
+            resp,
+            'group',
+            self.assertValidGroup,
+            *args,
+            **kwargs)
+
+    def assertValidGroup(self, entity, ref=None):
+        self.assertIsNotNone(entity.get('name'))
+        if ref:
+            self.assertEqual(ref['name'], entity['name'])
+        return entity
+
+    # credential validation
+
+    def assertValidCredentialListResponse(self, resp, *args, **kwargs):
+        return self.assertValidListResponse(
+            resp,
+            'credentials',
+            self.assertValidCredential,
+            keys_to_check=['blob', 'user_id', 'type'],
+            *args,
+            **kwargs)
+
+    def assertValidCredentialResponse(self, resp, *args, **kwargs):
+        return self.assertValidResponse(
+            resp,
+            'credential',
+            self.assertValidCredential,
+            keys_to_check=['blob', 'user_id', 'type'],
+            *args,
+            **kwargs)
+
+    def assertValidCredential(self, entity, ref=None):
+        self.assertIsNotNone(entity.get('user_id'))
+        self.assertIsNotNone(entity.get('blob'))
+        self.assertIsNotNone(entity.get('type'))
+        if ref:
+            self.assertEqual(ref['user_id'], entity['user_id'])
+            self.assertEqual(ref['blob'], entity['blob'])
+            self.assertEqual(ref['type'], entity['type'])
+            self.assertEqual(ref.get('project_id'), entity.get('project_id'))
+        return entity
+
+    # role validation
+
+    def assertValidRoleListResponse(self, resp, *args, **kwargs):
+        return self.assertValidListResponse(
+            resp,
+            'roles',
+            self.assertValidRole,
+            keys_to_check=['name'],
+            *args,
+            **kwargs)
+
+    def assertValidRoleResponse(self, resp, *args, **kwargs):
+        return self.assertValidResponse(
+            resp,
+            'role',
+            self.assertValidRole,
+            keys_to_check=['name'],
+            *args,
+            **kwargs)
+
+    def assertValidRole(self, entity, ref=None):
+        self.assertIsNotNone(entity.get('name'))
+        if ref:
+            self.assertEqual(ref['name'], entity['name'])
+        return entity
+
+    # role assignment validation
+
+    def assertValidRoleAssignmentListResponse(self, resp, expected_length=None,
+                                              resource_url=None):
+        entities = resp.result.get('role_assignments')
+
+        if expected_length:
+            self.assertEqual(expected_length, len(entities))
+
+        # Collections should have relational links
+        self.assertValidListLinks(resp.result.get('links'),
+                                  resource_url=resource_url)
+
+        for entity in entities:
+            self.assertIsNotNone(entity)
+            self.assertValidRoleAssignment(entity)
+        return entities
+
+    def assertValidRoleAssignment(self, entity, ref=None):
+        # A role should be present
+        self.assertIsNotNone(entity.get('role'))
+        self.assertIsNotNone(entity['role'].get('id'))
+
+        # Only one of user or group should be present
+        if entity.get('user'):
+            self.assertIsNone(entity.get('group'))
+            self.assertIsNotNone(entity['user'].get('id'))
+        else:
+            self.assertIsNotNone(entity.get('group'))
+            self.assertIsNotNone(entity['group'].get('id'))
+
+        # A scope should be present and have only one of domain or project
+        self.assertIsNotNone(entity.get('scope'))
+
+        if entity['scope'].get('project'):
+            self.assertIsNone(entity['scope'].get('domain'))
+            self.assertIsNotNone(entity['scope']['project'].get('id'))
+        else:
+            self.assertIsNotNone(entity['scope'].get('domain'))
+            self.assertIsNotNone(entity['scope']['domain'].get('id'))
+
+        # An assignment link should be present
+        self.assertIsNotNone(entity.get('links'))
+        self.assertIsNotNone(entity['links'].get('assignment'))
+
+        if ref:
+            links = ref.pop('links')
+            try:
+                self.assertDictContainsSubset(ref, entity)
+                self.assertIn(links['assignment'],
+                              entity['links']['assignment'])
+            finally:
+                if links:
+                    ref['links'] = links
+
+    def assertRoleAssignmentInListResponse(self, resp, ref, expected=1):
+
+        found_count = 0
+        for entity in resp.result.get('role_assignments'):
+            try:
+                self.assertValidRoleAssignment(entity, ref=ref)
+            except Exception:
+                # It doesn't match, so let's go onto the next one
+                pass
+            else:
+                found_count += 1
+        self.assertEqual(expected, found_count)
+
+    def assertRoleAssignmentNotInListResponse(self, resp, ref):
+        self.assertRoleAssignmentInListResponse(resp, ref=ref, expected=0)
+
+    # policy validation
+
+    def assertValidPolicyListResponse(self, resp, *args, **kwargs):
+        return self.assertValidListResponse(
+            resp,
+            'policies',
+            self.assertValidPolicy,
+            *args,
+            **kwargs)
+
+    def assertValidPolicyResponse(self, resp, *args, **kwargs):
+        return self.assertValidResponse(
+            resp,
+            'policy',
+            self.assertValidPolicy,
+            *args,
+            **kwargs)
+
+    def assertValidPolicy(self, entity, ref=None):
+        self.assertIsNotNone(entity.get('blob'))
+        self.assertIsNotNone(entity.get('type'))
+        if ref:
+            self.assertEqual(ref['blob'], entity['blob'])
+            self.assertEqual(ref['type'], entity['type'])
+        return entity
+
+    # trust validation
+
+    def assertValidTrustListResponse(self, resp, *args, **kwargs):
+        return self.assertValidListResponse(
+            resp,
+            'trusts',
+            self.assertValidTrustSummary,
+            keys_to_check=['trustor_user_id',
+                           'trustee_user_id',
+                           'impersonation'],
+            *args,
+            **kwargs)
+
+    def assertValidTrustResponse(self, resp, *args, **kwargs):
+        return self.assertValidResponse(
+            resp,
+            'trust',
+            self.assertValidTrust,
+            keys_to_check=['trustor_user_id',
+                           'trustee_user_id',
+                           'impersonation'],
+            *args,
+            **kwargs)
+
+    def assertValidTrustSummary(self, entity, ref=None):
+        return self.assertValidTrust(entity, ref, summary=True)
+
+    def assertValidTrust(self, entity, ref=None, summary=False):
+        self.assertIsNotNone(entity.get('trustor_user_id'))
+        self.assertIsNotNone(entity.get('trustee_user_id'))
+        self.assertIsNotNone(entity.get('impersonation'))
+
+        self.assertIn('expires_at', entity)
+        if entity['expires_at'] is not None:
+            self.assertValidISO8601ExtendedFormatDatetime(entity['expires_at'])
+
+        if summary:
+            # Trust list contains no roles, but getting a specific
+            # trust by ID provides the detailed response containing roles
+            self.assertNotIn('roles', entity)
+            self.assertIn('project_id', entity)
+        else:
+            for role in entity['roles']:
+                self.assertIsNotNone(role)
+                self.assertValidEntity(role, keys_to_check=['name'])
+                self.assertValidRole(role)
+
+            self.assertValidListLinks(entity.get('roles_links'))
+
+            # always disallow role xor project_id (neither or both is allowed)
+            has_roles = bool(entity.get('roles'))
+            has_project = bool(entity.get('project_id'))
+            self.assertFalse(has_roles ^ has_project)
+
+        if ref:
+            self.assertEqual(ref['trustor_user_id'], entity['trustor_user_id'])
+            self.assertEqual(ref['trustee_user_id'], entity['trustee_user_id'])
+            self.assertEqual(ref['project_id'], entity['project_id'])
+            if entity.get('expires_at') or ref.get('expires_at'):
+                entity_exp = self.assertValidISO8601ExtendedFormatDatetime(
+                    entity['expires_at'])
+                ref_exp = self.assertValidISO8601ExtendedFormatDatetime(
+                    ref['expires_at'])
+                self.assertCloseEnoughForGovernmentWork(entity_exp, ref_exp)
+            else:
+                self.assertEqual(ref.get('expires_at'),
+                                 entity.get('expires_at'))
+
+        return entity
+
+    def build_external_auth_request(self, remote_user,
+                                    remote_domain=None, auth_data=None,
+                                    kerberos=False):
+        context = {'environment': {'REMOTE_USER': remote_user,
+                                   'AUTH_TYPE': 'Negotiate'}}
+        if remote_domain:
+            context['environment']['REMOTE_DOMAIN'] = remote_domain
+        if not auth_data:
+            auth_data = self.build_authentication_request(
+                kerberos=kerberos)['auth']
+        no_context = None
+        auth_info = auth.controllers.AuthInfo.create(no_context, auth_data)
+        auth_context = {'extras': {}, 'method_names': []}
+        return context, auth_info, auth_context
+
+
+class VersionTestCase(RestfulTestCase):
+    def test_get_version(self):
+        pass
+
+
+# NOTE(gyee): test AuthContextMiddleware here instead of test_middleware.py
+# because we need the token
+class AuthContextMiddlewareTestCase(RestfulTestCase):
+    def _mock_request_object(self, token_id):
+
+        class fake_req(object):
+            headers = {middleware.AUTH_TOKEN_HEADER: token_id}
+            environ = {}
+
+        return fake_req()
+
+    def test_auth_context_build_by_middleware(self):
+        # test to make sure AuthContextMiddleware successful build the auth
+        # context from the incoming auth token
+        admin_token = self.get_scoped_token()
+        req = self._mock_request_object(admin_token)
+        application = None
+        middleware.AuthContextMiddleware(application).process_request(req)
+        self.assertEqual(
+            self.user['id'],
+            req.environ.get(authorization.AUTH_CONTEXT_ENV)['user_id'])
+
+    def test_auth_context_override(self):
+        overridden_context = 'OVERRIDDEN_CONTEXT'
+        # this token should not be used
+        token = uuid.uuid4().hex
+        req = self._mock_request_object(token)
+        req.environ[authorization.AUTH_CONTEXT_ENV] = overridden_context
+        application = None
+        middleware.AuthContextMiddleware(application).process_request(req)
+        # make sure overridden context take precedence
+        self.assertEqual(overridden_context,
+                         req.environ.get(authorization.AUTH_CONTEXT_ENV))
+
+    def test_admin_token_auth_context(self):
+        # test to make sure AuthContextMiddleware does not attempt to build
+        # auth context if the incoming auth token is the special admin token
+        req = self._mock_request_object(CONF.admin_token)
+        application = None
+        middleware.AuthContextMiddleware(application).process_request(req)
+        self.assertDictEqual(req.environ.get(authorization.AUTH_CONTEXT_ENV),
+                             {})
+
+
+class JsonHomeTestMixin(object):
+    """JSON Home test
+
+    Mixin this class to provide a test for the JSON-Home response for an
+    extension.
+
+    The base class must set JSON_HOME_DATA to a dict of relationship URLs
+    (rels) to the JSON-Home data for the relationship. The rels and associated
+    data must be in the response.
+
+    """
+    def test_get_json_home(self):
+        resp = self.get('/', convert=False,
+                        headers={'Accept': 'application/json-home'})
+        self.assertThat(resp.headers['Content-Type'],
+                        matchers.Equals('application/json-home'))
+        resp_data = jsonutils.loads(resp.body)
+
+        # Check that the example relationships are present.
+        for rel in self.JSON_HOME_DATA:
+            self.assertThat(resp_data['resources'][rel],
+                            matchers.Equals(self.JSON_HOME_DATA[rel]))
diff --git a/keystone-moon/keystone/tests/unit/test_v3_assignment.py b/keystone-moon/keystone/tests/unit/test_v3_assignment.py
new file mode 100644 (file)
index 0000000..add14bf
--- /dev/null
@@ -0,0 +1,2943 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import random
+import six
+import uuid
+
+from oslo_config import cfg
+
+from keystone.common import controller
+from keystone import exception
+from keystone.tests import unit as tests
+from keystone.tests.unit import test_v3
+
+
+CONF = cfg.CONF
+
+
+def _build_role_assignment_query_url(effective=False, **filters):
+    '''Build and return a role assignment query url with provided params.
+
+    Available filters are: domain_id, project_id, user_id, group_id, role_id
+    and inherited_to_projects.
+
+    '''
+
+    query_params = '?effective' if effective else ''
+
+    for k, v in six.iteritems(filters):
+        query_params += '?' if not query_params else '&'
+
+        if k == 'inherited_to_projects':
+            query_params += 'scope.OS-INHERIT:inherited_to=projects'
+        else:
+            if k in ['domain_id', 'project_id']:
+                query_params += 'scope.'
+            elif k not in ['user_id', 'group_id', 'role_id']:
+                raise ValueError('Invalid key \'%s\' in provided filters.' % k)
+
+            query_params += '%s=%s' % (k.replace('_', '.'), v)
+
+    return '/role_assignments%s' % query_params
+
+
+def _build_role_assignment_link(**attribs):
+    """Build and return a role assignment link with provided attributes.
+
+    Provided attributes are expected to contain: domain_id or project_id,
+    user_id or group_id, role_id and, optionally, inherited_to_projects.
+
+    """
+
+    if attribs.get('domain_id'):
+        link = '/domains/' + attribs['domain_id']
+    else:
+        link = '/projects/' + attribs['project_id']
+
+    if attribs.get('user_id'):
+        link += '/users/' + attribs['user_id']
+    else:
+        link += '/groups/' + attribs['group_id']
+
+    link += '/roles/' + attribs['role_id']
+
+    if attribs.get('inherited_to_projects'):
+        return '/OS-INHERIT%s/inherited_to_projects' % link
+
+    return link
+
+
+def _build_role_assignment_entity(link=None, **attribs):
+    """Build and return a role assignment entity with provided attributes.
+
+    Provided attributes are expected to contain: domain_id or project_id,
+    user_id or group_id, role_id and, optionally, inherited_to_projects.
+
+    """
+
+    entity = {'links': {'assignment': (
+        link or _build_role_assignment_link(**attribs))}}
+
+    if attribs.get('domain_id'):
+        entity['scope'] = {'domain': {'id': attribs['domain_id']}}
+    else:
+        entity['scope'] = {'project': {'id': attribs['project_id']}}
+
+    if attribs.get('user_id'):
+        entity['user'] = {'id': attribs['user_id']}
+
+        if attribs.get('group_id'):
+            entity['links']['membership'] = ('/groups/%s/users/%s' %
+                                             (attribs['group_id'],
+                                              attribs['user_id']))
+    else:
+        entity['group'] = {'id': attribs['group_id']}
+
+    entity['role'] = {'id': attribs['role_id']}
+
+    if attribs.get('inherited_to_projects'):
+        entity['scope']['OS-INHERIT:inherited_to'] = 'projects'
+
+    return entity
+
+
+class AssignmentTestCase(test_v3.RestfulTestCase):
+    """Test domains, projects, roles and role assignments."""
+
+    def setUp(self):
+        super(AssignmentTestCase, self).setUp()
+
+        self.group = self.new_group_ref(
+            domain_id=self.domain_id)
+        self.group = self.identity_api.create_group(self.group)
+        self.group_id = self.group['id']
+
+        self.credential_id = uuid.uuid4().hex
+        self.credential = self.new_credential_ref(
+            user_id=self.user['id'],
+            project_id=self.project_id)
+        self.credential['id'] = self.credential_id
+        self.credential_api.create_credential(
+            self.credential_id,
+            self.credential)
+
+    # Domain CRUD tests
+
+    def test_create_domain(self):
+        """Call ``POST /domains``."""
+        ref = self.new_domain_ref()
+        r = self.post(
+            '/domains',
+            body={'domain': ref})
+        return self.assertValidDomainResponse(r, ref)
+
+    def test_create_domain_case_sensitivity(self):
+        """Call `POST /domains`` twice with upper() and lower() cased name."""
+        ref = self.new_domain_ref()
+
+        # ensure the name is lowercase
+        ref['name'] = ref['name'].lower()
+        r = self.post(
+            '/domains',
+            body={'domain': ref})
+        self.assertValidDomainResponse(r, ref)
+
+        # ensure the name is uppercase
+        ref['name'] = ref['name'].upper()
+        r = self.post(
+            '/domains',
+            body={'domain': ref})
+        self.assertValidDomainResponse(r, ref)
+
+    def test_create_domain_400(self):
+        """Call ``POST /domains``."""
+        self.post('/domains', body={'domain': {}}, expected_status=400)
+
+    def test_list_domains(self):
+        """Call ``GET /domains``."""
+        resource_url = '/domains'
+        r = self.get(resource_url)
+        self.assertValidDomainListResponse(r, ref=self.domain,
+                                           resource_url=resource_url)
+
+    def test_get_domain(self):
+        """Call ``GET /domains/{domain_id}``."""
+        r = self.get('/domains/%(domain_id)s' % {
+            'domain_id': self.domain_id})
+        self.assertValidDomainResponse(r, self.domain)
+
+    def test_update_domain(self):
+        """Call ``PATCH /domains/{domain_id}``."""
+        ref = self.new_domain_ref()
+        del ref['id']
+        r = self.patch('/domains/%(domain_id)s' % {
+            'domain_id': self.domain_id},
+            body={'domain': ref})
+        self.assertValidDomainResponse(r, ref)
+
+    def test_disable_domain(self):
+        """Call ``PATCH /domains/{domain_id}`` (set enabled=False)."""
+        # Create a 2nd set of entities in a 2nd domain
+        self.domain2 = self.new_domain_ref()
+        self.resource_api.create_domain(self.domain2['id'], self.domain2)
+
+        self.project2 = self.new_project_ref(
+            domain_id=self.domain2['id'])
+        self.resource_api.create_project(self.project2['id'], self.project2)
+
+        self.user2 = self.new_user_ref(
+            domain_id=self.domain2['id'],
+            project_id=self.project2['id'])
+        password = self.user2['password']
+        self.user2 = self.identity_api.create_user(self.user2)
+        self.user2['password'] = password
+
+        self.assignment_api.add_user_to_project(self.project2['id'],
+                                                self.user2['id'])
+
+        # First check a user in that domain can authenticate, via
+        # Both v2 and v3
+        body = {
+            'auth': {
+                'passwordCredentials': {
+                    'userId': self.user2['id'],
+                    'password': self.user2['password']
+                },
+                'tenantId': self.project2['id']
+            }
+        }
+        self.admin_request(path='/v2.0/tokens', method='POST', body=body)
+
+        auth_data = self.build_authentication_request(
+            user_id=self.user2['id'],
+            password=self.user2['password'],
+            project_id=self.project2['id'])
+        self.v3_authenticate_token(auth_data)
+
+        # Now disable the domain
+        self.domain2['enabled'] = False
+        r = self.patch('/domains/%(domain_id)s' % {
+            'domain_id': self.domain2['id']},
+            body={'domain': {'enabled': False}})
+        self.assertValidDomainResponse(r, self.domain2)
+
+        # Make sure the user can no longer authenticate, via
+        # either API
+        body = {
+            'auth': {
+                'passwordCredentials': {
+                    'userId': self.user2['id'],
+                    'password': self.user2['password']
+                },
+                'tenantId': self.project2['id']
+            }
+        }
+        self.admin_request(
+            path='/v2.0/tokens', method='POST', body=body, expected_status=401)
+
+        # Try looking up in v3 by name and id
+        auth_data = self.build_authentication_request(
+            user_id=self.user2['id'],
+            password=self.user2['password'],
+            project_id=self.project2['id'])
+        self.v3_authenticate_token(auth_data, expected_status=401)
+
+        auth_data = self.build_authentication_request(
+            username=self.user2['name'],
+            user_domain_id=self.domain2['id'],
+            password=self.user2['password'],
+            project_id=self.project2['id'])
+        self.v3_authenticate_token(auth_data, expected_status=401)
+
+    def test_delete_enabled_domain_fails(self):
+        """Call ``DELETE /domains/{domain_id}`` (when domain enabled)."""
+
+        # Try deleting an enabled domain, which should fail
+        self.delete('/domains/%(domain_id)s' % {
+            'domain_id': self.domain['id']},
+            expected_status=exception.ForbiddenAction.code)
+
+    def test_delete_domain(self):
+        """Call ``DELETE /domains/{domain_id}``.
+
+        The sample data set up already has a user, group, project
+        and credential that is part of self.domain. Since the user
+        we will authenticate with is in this domain, we create a
+        another set of entities in a second domain.  Deleting this
+        second domain should delete all these new entities. In addition,
+        all the entities in the regular self.domain should be unaffected
+        by the delete.
+
+        Test Plan:
+
+        - Create domain2 and a 2nd set of entities
+        - Disable domain2
+        - Delete domain2
+        - Check entities in domain2 have been deleted
+        - Check entities in self.domain are unaffected
+
+        """
+
+        # Create a 2nd set of entities in a 2nd domain
+        self.domain2 = self.new_domain_ref()
+        self.resource_api.create_domain(self.domain2['id'], self.domain2)
+
+        self.project2 = self.new_project_ref(
+            domain_id=self.domain2['id'])
+        self.resource_api.create_project(self.project2['id'], self.project2)
+
+        self.user2 = self.new_user_ref(
+            domain_id=self.domain2['id'],
+            project_id=self.project2['id'])
+        self.user2 = self.identity_api.create_user(self.user2)
+
+        self.group2 = self.new_group_ref(
+            domain_id=self.domain2['id'])
+        self.group2 = self.identity_api.create_group(self.group2)
+
+        self.credential2 = self.new_credential_ref(
+            user_id=self.user2['id'],
+            project_id=self.project2['id'])
+        self.credential_api.create_credential(
+            self.credential2['id'],
+            self.credential2)
+
+        # Now disable the new domain and delete it
+        self.domain2['enabled'] = False
+        r = self.patch('/domains/%(domain_id)s' % {
+            'domain_id': self.domain2['id']},
+            body={'domain': {'enabled': False}})
+        self.assertValidDomainResponse(r, self.domain2)
+        self.delete('/domains/%(domain_id)s' % {
+            'domain_id': self.domain2['id']})
+
+        # Check all the domain2 relevant entities are gone
+        self.assertRaises(exception.DomainNotFound,
+                          self.resource_api.get_domain,
+                          self.domain2['id'])
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project,
+                          self.project2['id'])
+        self.assertRaises(exception.GroupNotFound,
+                          self.identity_api.get_group,
+                          self.group2['id'])
+        self.assertRaises(exception.UserNotFound,
+                          self.identity_api.get_user,
+                          self.user2['id'])
+        self.assertRaises(exception.CredentialNotFound,
+                          self.credential_api.get_credential,
+                          self.credential2['id'])
+
+        # ...and that all self.domain entities are still here
+        r = self.resource_api.get_domain(self.domain['id'])
+        self.assertDictEqual(r, self.domain)
+        r = self.resource_api.get_project(self.project['id'])
+        self.assertDictEqual(r, self.project)
+        r = self.identity_api.get_group(self.group['id'])
+        self.assertDictEqual(r, self.group)
+        r = self.identity_api.get_user(self.user['id'])
+        self.user.pop('password')
+        self.assertDictEqual(r, self.user)
+        r = self.credential_api.get_credential(self.credential['id'])
+        self.assertDictEqual(r, self.credential)
+
+    def test_delete_default_domain_fails(self):
+        # Attempting to delete the default domain results in 403 Forbidden.
+
+        # Need to disable it first.
+        self.patch('/domains/%(domain_id)s' % {
+            'domain_id': CONF.identity.default_domain_id},
+            body={'domain': {'enabled': False}})
+
+        self.delete('/domains/%(domain_id)s' % {
+            'domain_id': CONF.identity.default_domain_id},
+            expected_status=exception.ForbiddenAction.code)
+
+    def test_delete_new_default_domain_fails(self):
+        # If change the default domain ID, deleting the new default domain
+        # results in a 403 Forbidden.
+
+        # Create a new domain that's not the default
+        new_domain = self.new_domain_ref()
+        new_domain_id = new_domain['id']
+        self.resource_api.create_domain(new_domain_id, new_domain)
+
+        # Disable the new domain so can delete it later.
+        self.patch('/domains/%(domain_id)s' % {
+            'domain_id': new_domain_id},
+            body={'domain': {'enabled': False}})
+
+        # Change the default domain
+        self.config_fixture.config(group='identity',
+                                   default_domain_id=new_domain_id)
+
+        # Attempt to delete the new domain
+
+        self.delete('/domains/%(domain_id)s' % {'domain_id': new_domain_id},
+                    expected_status=exception.ForbiddenAction.code)
+
+    def test_delete_old_default_domain(self):
+        # If change the default domain ID, deleting the old default domain
+        # works.
+
+        # Create a new domain that's not the default
+        new_domain = self.new_domain_ref()
+        new_domain_id = new_domain['id']
+        self.resource_api.create_domain(new_domain_id, new_domain)
+
+        old_default_domain_id = CONF.identity.default_domain_id
+
+        # Disable the default domain so we can delete it later.
+        self.patch('/domains/%(domain_id)s' % {
+            'domain_id': old_default_domain_id},
+            body={'domain': {'enabled': False}})
+
+        # Change the default domain
+        self.config_fixture.config(group='identity',
+                                   default_domain_id=new_domain_id)
+
+        # Delete the old default domain
+
+        self.delete(
+            '/domains/%(domain_id)s' % {'domain_id': old_default_domain_id})
+
+    def test_token_revoked_once_domain_disabled(self):
+        """Test token from a disabled domain has been invalidated.
+
+        Test that a token that was valid for an enabled domain
+        becomes invalid once that domain is disabled.
+
+        """
+
+        self.domain = self.new_domain_ref()
+        self.resource_api.create_domain(self.domain['id'], self.domain)
+
+        self.user2 = self.new_user_ref(domain_id=self.domain['id'])
+        password = self.user2['password']
+        self.user2 = self.identity_api.create_user(self.user2)
+        self.user2['password'] = password
+
+        # build a request body
+        auth_body = self.build_authentication_request(
+            user_id=self.user2['id'],
+            password=self.user2['password'])
+
+        # sends a request for the user's token
+        token_resp = self.post('/auth/tokens', body=auth_body)
+
+        subject_token = token_resp.headers.get('x-subject-token')
+
+        # validates the returned token and it should be valid.
+        self.head('/auth/tokens',
+                  headers={'x-subject-token': subject_token},
+                  expected_status=200)
+
+        # now disable the domain
+        self.domain['enabled'] = False
+        url = "/domains/%(domain_id)s" % {'domain_id': self.domain['id']}
+        self.patch(url,
+                   body={'domain': {'enabled': False}},
+                   expected_status=200)
+
+        # validates the same token again and it should be 'not found'
+        # as the domain has already been disabled.
+        self.head('/auth/tokens',
+                  headers={'x-subject-token': subject_token},
+                  expected_status=404)
+
+    def test_delete_domain_hierarchy(self):
+        """Call ``DELETE /domains/{domain_id}``."""
+        domain = self.new_domain_ref()
+        self.resource_api.create_domain(domain['id'], domain)
+
+        root_project = self.new_project_ref(
+            domain_id=domain['id'])
+        self.resource_api.create_project(root_project['id'], root_project)
+
+        leaf_project = self.new_project_ref(
+            domain_id=domain['id'],
+            parent_id=root_project['id'])
+        self.resource_api.create_project(leaf_project['id'], leaf_project)
+
+        # Need to disable it first.
+        self.patch('/domains/%(domain_id)s' % {
+            'domain_id': domain['id']},
+            body={'domain': {'enabled': False}})
+
+        self.delete(
+            '/domains/%(domain_id)s' % {
+                'domain_id': domain['id']})
+
+        self.assertRaises(exception.DomainNotFound,
+                          self.resource_api.get_domain,
+                          domain['id'])
+
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project,
+                          root_project['id'])
+
+        self.assertRaises(exception.ProjectNotFound,
+                          self.resource_api.get_project,
+                          leaf_project['id'])
+
+    def test_forbid_operations_on_federated_domain(self):
+        """Make sure one cannot operate on federated domain.
+
+        This includes operations like create, update, delete
+        on domain identified by id and name where difference variations of
+        id 'Federated' are used.
+
+        """
+        def create_domains():
+            for variation in ('Federated', 'FEDERATED',
+                              'federated', 'fEderated'):
+                domain = self.new_domain_ref()
+                domain['id'] = variation
+                yield domain
+
+        for domain in create_domains():
+            self.assertRaises(
+                AssertionError, self.assignment_api.create_domain,
+                domain['id'], domain)
+            self.assertRaises(
+                AssertionError, self.assignment_api.update_domain,
+                domain['id'], domain)
+            self.assertRaises(
+                exception.DomainNotFound, self.assignment_api.delete_domain,
+                domain['id'])
+
+            # swap 'name' with 'id' and try again, expecting the request to
+            # gracefully fail
+            domain['id'], domain['name'] = domain['name'], domain['id']
+            self.assertRaises(
+                AssertionError, self.assignment_api.create_domain,
+                domain['id'], domain)
+            self.assertRaises(
+                AssertionError, self.assignment_api.update_domain,
+                domain['id'], domain)
+            self.assertRaises(
+                exception.DomainNotFound, self.assignment_api.delete_domain,
+                domain['id'])
+
+    def test_forbid_operations_on_defined_federated_domain(self):
+        """Make sure one cannot operate on a user-defined federated domain.
+
+        This includes operations like create, update, delete.
+
+        """
+
+        non_default_name = 'beta_federated_domain'
+        self.config_fixture.config(group='federation',
+                                   federated_domain_name=non_default_name)
+        domain = self.new_domain_ref()
+        domain['name'] = non_default_name
+        self.assertRaises(AssertionError,
+                          self.assignment_api.create_domain,
+                          domain['id'], domain)
+        self.assertRaises(exception.DomainNotFound,
+                          self.assignment_api.delete_domain,
+                          domain['id'])
+        self.assertRaises(AssertionError,
+                          self.assignment_api.update_domain,
+                          domain['id'], domain)
+
+    def test_set_federated_domain_when_config_empty(self):
+        """Make sure we are operable even if config value is not properly
+        set.
+
+        This includes operations like create, update, delete.
+
+        """
+        federated_name = 'Federated'
+        self.config_fixture.config(group='federation',
+                                   federated_domain_name='')
+        domain = self.new_domain_ref()
+        domain['id'] = federated_name
+        self.assertRaises(AssertionError,
+                          self.assignment_api.create_domain,
+                          domain['id'], domain)
+        self.assertRaises(exception.DomainNotFound,
+                          self.assignment_api.delete_domain,
+                          domain['id'])
+        self.assertRaises(AssertionError,
+                          self.assignment_api.update_domain,
+                          domain['id'], domain)
+
+        # swap id with name
+        domain['id'], domain['name'] = domain['name'], domain['id']
+        self.assertRaises(AssertionError,
+                          self.assignment_api.create_domain,
+                          domain['id'], domain)
+        self.assertRaises(exception.DomainNotFound,
+                          self.assignment_api.delete_domain,
+                          domain['id'])
+        self.assertRaises(AssertionError,
+                          self.assignment_api.update_domain,
+                          domain['id'], domain)
+
+    # Project CRUD tests
+
+    def test_list_projects(self):
+        """Call ``GET /projects``."""
+        resource_url = '/projects'
+        r = self.get(resource_url)
+        self.assertValidProjectListResponse(r, ref=self.project,
+                                            resource_url=resource_url)
+
+    def test_create_project(self):
+        """Call ``POST /projects``."""
+        ref = self.new_project_ref(domain_id=self.domain_id)
+        r = self.post(
+            '/projects',
+            body={'project': ref})
+        self.assertValidProjectResponse(r, ref)
+
+    def test_create_project_400(self):
+        """Call ``POST /projects``."""
+        self.post('/projects', body={'project': {}}, expected_status=400)
+
+    def _create_projects_hierarchy(self, hierarchy_size=1):
+        """Creates a project hierarchy with specified size.
+
+        :param hierarchy_size: the desired hierarchy size, default is 1 -
+                               a project with one child.
+
+        :returns projects: a list of the projects in the created hierarchy.
+
+        """
+        resp = self.get(
+            '/projects/%(project_id)s' % {
+                'project_id': self.project_id})
+
+        projects = [resp.result]
+
+        for i in range(hierarchy_size):
+            new_ref = self.new_project_ref(
+                domain_id=self.domain_id,
+                parent_id=projects[i]['project']['id'])
+            resp = self.post('/projects',
+                             body={'project': new_ref})
+            self.assertValidProjectResponse(resp, new_ref)
+
+            projects.append(resp.result)
+
+        return projects
+
+    def test_create_hierarchical_project(self):
+        """Call ``POST /projects``."""
+        self._create_projects_hierarchy()
+
+    def test_get_project(self):
+        """Call ``GET /projects/{project_id}``."""
+        r = self.get(
+            '/projects/%(project_id)s' % {
+                'project_id': self.project_id})
+        self.assertValidProjectResponse(r, self.project)
+
+    def test_get_project_with_parents_as_ids(self):
+        """Call ``GET /projects/{project_id}?parents_as_ids``."""
+        projects = self._create_projects_hierarchy(hierarchy_size=2)
+
+        # Query for projects[2] parents_as_ids
+        r = self.get(
+            '/projects/%(project_id)s?parents_as_ids' % {
+                'project_id': projects[2]['project']['id']})
+
+        self.assertValidProjectResponse(r, projects[2]['project'])
+        parents_as_ids = r.result['project']['parents']
+
+        # Assert parents_as_ids is a structured dictionary correctly
+        # representing the hierarchy. The request was made using projects[2]
+        # id, hence its parents should be projects[1] and projects[0]. It
+        # should have the following structure:
+        # {
+        #   projects[1]: {
+        #       projects[0]: None
+        #   }
+        # }
+        expected_dict = {
+            projects[1]['project']['id']: {
+                projects[0]['project']['id']: None
+            }
+        }
+        self.assertDictEqual(expected_dict, parents_as_ids)
+
+        # Query for projects[0] parents_as_ids
+        r = self.get(
+            '/projects/%(project_id)s?parents_as_ids' % {
+                'project_id': projects[0]['project']['id']})
+
+        self.assertValidProjectResponse(r, projects[0]['project'])
+        parents_as_ids = r.result['project']['parents']
+
+        # projects[0] has no parents, parents_as_ids must be None
+        self.assertIsNone(parents_as_ids)
+
+    def test_get_project_with_parents_as_list(self):
+        """Call ``GET /projects/{project_id}?parents_as_list``."""
+        projects = self._create_projects_hierarchy(hierarchy_size=2)
+
+        r = self.get(
+            '/projects/%(project_id)s?parents_as_list' % {
+                'project_id': projects[1]['project']['id']})
+
+        self.assertEqual(1, len(r.result['project']['parents']))
+        self.assertValidProjectResponse(r, projects[1]['project'])
+        self.assertIn(projects[0], r.result['project']['parents'])
+        self.assertNotIn(projects[2], r.result['project']['parents'])
+
+    def test_get_project_with_parents_as_list_and_parents_as_ids(self):
+        """Call ``GET /projects/{project_id}?parents_as_list&parents_as_ids``.
+
+        """
+        projects = self._create_projects_hierarchy(hierarchy_size=2)
+
+        self.get(
+            '/projects/%(project_id)s?parents_as_list&parents_as_ids' % {
+                'project_id': projects[1]['project']['id']},
+            expected_status=400)
+
+    def test_get_project_with_subtree_as_ids(self):
+        """Call ``GET /projects/{project_id}?subtree_as_ids``.
+
+        This test creates a more complex hierarchy to test if the structured
+        dictionary returned by using the ``subtree_as_ids`` query param
+        correctly represents the hierarchy.
+
+        The hierarchy contains 5 projects with the following structure::
+
+                                  +--A--+
+                                  |     |
+                               +--B--+  C
+                               |     |
+                               D     E
+
+
+        """
+        projects = self._create_projects_hierarchy(hierarchy_size=2)
+
+        # Add another child to projects[0] - it will be projects[3]
+        new_ref = self.new_project_ref(
+            domain_id=self.domain_id,
+            parent_id=projects[0]['project']['id'])
+        resp = self.post('/projects',
+                         body={'project': new_ref})
+        self.assertValidProjectResponse(resp, new_ref)
+        projects.append(resp.result)
+
+        # Add another child to projects[1] - it will be projects[4]
+        new_ref = self.new_project_ref(
+            domain_id=self.domain_id,
+            parent_id=projects[1]['project']['id'])
+        resp = self.post('/projects',
+                         body={'project': new_ref})
+        self.assertValidProjectResponse(resp, new_ref)
+        projects.append(resp.result)
+
+        # Query for projects[0] subtree_as_ids
+        r = self.get(
+            '/projects/%(project_id)s?subtree_as_ids' % {
+                'project_id': projects[0]['project']['id']})
+        self.assertValidProjectResponse(r, projects[0]['project'])
+        subtree_as_ids = r.result['project']['subtree']
+
+        # The subtree hierarchy from projects[0] should have the following
+        # structure:
+        # {
+        #   projects[1]: {
+        #       projects[2]: None,
+        #       projects[4]: None
+        #   },
+        #   projects[3]: None
+        # }
+        expected_dict = {
+            projects[1]['project']['id']: {
+                projects[2]['project']['id']: None,
+                projects[4]['project']['id']: None
+            },
+            projects[3]['project']['id']: None
+        }
+        self.assertDictEqual(expected_dict, subtree_as_ids)
+
+        # Now query for projects[1] subtree_as_ids
+        r = self.get(
+            '/projects/%(project_id)s?subtree_as_ids' % {
+                'project_id': projects[1]['project']['id']})
+        self.assertValidProjectResponse(r, projects[1]['project'])
+        subtree_as_ids = r.result['project']['subtree']
+
+        # The subtree hierarchy from projects[1] should have the following
+        # structure:
+        # {
+        #   projects[2]: None,
+        #   projects[4]: None
+        # }
+        expected_dict = {
+            projects[2]['project']['id']: None,
+            projects[4]['project']['id']: None
+        }
+        self.assertDictEqual(expected_dict, subtree_as_ids)
+
+        # Now query for projects[3] subtree_as_ids
+        r = self.get(
+            '/projects/%(project_id)s?subtree_as_ids' % {
+                'project_id': projects[3]['project']['id']})
+        self.assertValidProjectResponse(r, projects[3]['project'])
+        subtree_as_ids = r.result['project']['subtree']
+
+        # projects[3] has no subtree, subtree_as_ids must be None
+        self.assertIsNone(subtree_as_ids)
+
+    def test_get_project_with_subtree_as_list(self):
+        """Call ``GET /projects/{project_id}?subtree_as_list``."""
+        projects = self._create_projects_hierarchy(hierarchy_size=2)
+
+        r = self.get(
+            '/projects/%(project_id)s?subtree_as_list' % {
+                'project_id': projects[1]['project']['id']})
+
+        self.assertEqual(1, len(r.result['project']['subtree']))
+        self.assertValidProjectResponse(r, projects[1]['project'])
+        self.assertNotIn(projects[0], r.result['project']['subtree'])
+        self.assertIn(projects[2], r.result['project']['subtree'])
+
+    def test_get_project_with_subtree_as_list_and_subtree_as_ids(self):
+        """Call ``GET /projects/{project_id}?subtree_as_list&subtree_as_ids``.
+
+        """
+        projects = self._create_projects_hierarchy(hierarchy_size=2)
+
+        self.get(
+            '/projects/%(project_id)s?subtree_as_list&subtree_as_ids' % {
+                'project_id': projects[1]['project']['id']},
+            expected_status=400)
+
+    def test_update_project(self):
+        """Call ``PATCH /projects/{project_id}``."""
+        ref = self.new_project_ref(domain_id=self.domain_id)
+        del ref['id']
+        r = self.patch(
+            '/projects/%(project_id)s' % {
+                'project_id': self.project_id},
+            body={'project': ref})
+        self.assertValidProjectResponse(r, ref)
+
+    def test_update_project_domain_id(self):
+        """Call ``PATCH /projects/{project_id}`` with domain_id."""
+        project = self.new_project_ref(domain_id=self.domain['id'])
+        self.resource_api.create_project(project['id'], project)
+        project['domain_id'] = CONF.identity.default_domain_id
+        r = self.patch('/projects/%(project_id)s' % {
+            'project_id': project['id']},
+            body={'project': project},
+            expected_status=exception.ValidationError.code)
+        self.config_fixture.config(domain_id_immutable=False)
+        project['domain_id'] = self.domain['id']
+        r = self.patch('/projects/%(project_id)s' % {
+            'project_id': project['id']},
+            body={'project': project})
+        self.assertValidProjectResponse(r, project)
+
+    def test_update_project_parent_id(self):
+        """Call ``PATCH /projects/{project_id}``."""
+        projects = self._create_projects_hierarchy()
+        leaf_project = projects[1]['project']
+        leaf_project['parent_id'] = None
+        self.patch(
+            '/projects/%(project_id)s' % {
+                'project_id': leaf_project['id']},
+            body={'project': leaf_project},
+            expected_status=403)
+
+    def test_disable_leaf_project(self):
+        """Call ``PATCH /projects/{project_id}``."""
+        projects = self._create_projects_hierarchy()
+        leaf_project = projects[1]['project']
+        leaf_project['enabled'] = False
+        r = self.patch(
+            '/projects/%(project_id)s' % {
+                'project_id': leaf_project['id']},
+            body={'project': leaf_project})
+        self.assertEqual(
+            leaf_project['enabled'], r.result['project']['enabled'])
+
+    def test_disable_not_leaf_project(self):
+        """Call ``PATCH /projects/{project_id}``."""
+        projects = self._create_projects_hierarchy()
+        root_project = projects[0]['project']
+        root_project['enabled'] = False
+        self.patch(
+            '/projects/%(project_id)s' % {
+                'project_id': root_project['id']},
+            body={'project': root_project},
+            expected_status=403)
+
+    def test_delete_project(self):
+        """Call ``DELETE /projects/{project_id}``
+
+        As well as making sure the delete succeeds, we ensure
+        that any credentials that reference this projects are
+        also deleted, while other credentials are unaffected.
+
+        """
+        # First check the credential for this project is present
+        r = self.credential_api.get_credential(self.credential['id'])
+        self.assertDictEqual(r, self.credential)
+        # Create a second credential with a different project
+        self.project2 = self.new_project_ref(
+            domain_id=self.domain['id'])
+        self.resource_api.create_project(self.project2['id'], self.project2)
+        self.credential2 = self.new_credential_ref(
+            user_id=self.user['id'],
+            project_id=self.project2['id'])
+        self.credential_api.create_credential(
+            self.credential2['id'],
+            self.credential2)
+
+        # Now delete the project
+        self.delete(
+            '/projects/%(project_id)s' % {
+                'project_id': self.project_id})
+
+        # Deleting the project should have deleted any credentials
+        # that reference this project
+        self.assertRaises(exception.CredentialNotFound,
+                          self.credential_api.get_credential,
+                          credential_id=self.credential['id'])
+        # But the credential for project2 is unaffected
+        r = self.credential_api.get_credential(self.credential2['id'])
+        self.assertDictEqual(r, self.credential2)
+
+    def test_delete_not_leaf_project(self):
+        """Call ``DELETE /projects/{project_id}``."""
+        self._create_projects_hierarchy()
+        self.delete(
+            '/projects/%(project_id)s' % {
+                'project_id': self.project_id},
+            expected_status=403)
+
+    # Role CRUD tests
+
+    def test_create_role(self):
+        """Call ``POST /roles``."""
+        ref = self.new_role_ref()
+        r = self.post(
+            '/roles',
+            body={'role': ref})
+        return self.assertValidRoleResponse(r, ref)
+
+    def test_create_role_400(self):
+        """Call ``POST /roles``."""
+        self.post('/roles', body={'role': {}}, expected_status=400)
+
+    def test_list_roles(self):
+        """Call ``GET /roles``."""
+        resource_url = '/roles'
+        r = self.get(resource_url)
+        self.assertValidRoleListResponse(r, ref=self.role,
+                                         resource_url=resource_url)
+
+    def test_get_role(self):
+        """Call ``GET /roles/{role_id}``."""
+        r = self.get('/roles/%(role_id)s' % {
+            'role_id': self.role_id})
+        self.assertValidRoleResponse(r, self.role)
+
+    def test_update_role(self):
+        """Call ``PATCH /roles/{role_id}``."""
+        ref = self.new_role_ref()
+        del ref['id']
+        r = self.patch('/roles/%(role_id)s' % {
+            'role_id': self.role_id},
+            body={'role': ref})
+        self.assertValidRoleResponse(r, ref)
+
+    def test_delete_role(self):
+        """Call ``DELETE /roles/{role_id}``."""
+        self.delete('/roles/%(role_id)s' % {
+            'role_id': self.role_id})
+
+    # Role Grants tests
+
+    def test_crud_user_project_role_grants(self):
+        collection_url = (
+            '/projects/%(project_id)s/users/%(user_id)s/roles' % {
+                'project_id': self.project['id'],
+                'user_id': self.user['id']})
+        member_url = '%(collection_url)s/%(role_id)s' % {
+            'collection_url': collection_url,
+            'role_id': self.role_id}
+
+        self.put(member_url)
+        self.head(member_url)
+        r = self.get(collection_url)
+        self.assertValidRoleListResponse(r, ref=self.role,
+                                         resource_url=collection_url)
+
+        # FIXME(gyee): this test is no longer valid as user
+        # have no role in the project. Can't get a scoped token
+        # self.delete(member_url)
+        # r = self.get(collection_url)
+        # self.assertValidRoleListResponse(r, expected_length=0)
+        # self.assertIn(collection_url, r.result['links']['self'])
+
+    def test_crud_user_project_role_grants_no_user(self):
+        """Grant role on a project to a user that doesn't exist, 404 result.
+
+        When grant a role on a project to a user that doesn't exist, the server
+        returns 404 Not Found for the user.
+
+        """
+
+        user_id = uuid.uuid4().hex
+
+        collection_url = (
+            '/projects/%(project_id)s/users/%(user_id)s/roles' % {
+                'project_id': self.project['id'], 'user_id': user_id})
+        member_url = '%(collection_url)s/%(role_id)s' % {
+            'collection_url': collection_url,
+            'role_id': self.role_id}
+
+        self.put(member_url, expected_status=404)
+
+    def test_crud_user_domain_role_grants(self):
+        collection_url = (
+            '/domains/%(domain_id)s/users/%(user_id)s/roles' % {
+                'domain_id': self.domain_id,
+                'user_id': self.user['id']})
+        member_url = '%(collection_url)s/%(role_id)s' % {
+            'collection_url': collection_url,
+            'role_id': self.role_id}
+
+        self.put(member_url)
+        self.head(member_url)
+        r = self.get(collection_url)
+        self.assertValidRoleListResponse(r, ref=self.role,
+                                         resource_url=collection_url)
+
+        self.delete(member_url)
+        r = self.get(collection_url)
+        self.assertValidRoleListResponse(r, expected_length=0,
+                                         resource_url=collection_url)
+
+    def test_crud_user_domain_role_grants_no_user(self):
+        """Grant role on a domain to a user that doesn't exist, 404 result.
+
+        When grant a role on a domain to a user that doesn't exist, the server
+        returns 404 Not Found for the user.
+
+        """
+
+        user_id = uuid.uuid4().hex
+
+        collection_url = (
+            '/domains/%(domain_id)s/users/%(user_id)s/roles' % {
+                'domain_id': self.domain_id, 'user_id': user_id})
+        member_url = '%(collection_url)s/%(role_id)s' % {
+            'collection_url': collection_url,
+            'role_id': self.role_id}
+
+        self.put(member_url, expected_status=404)
+
+    def test_crud_group_project_role_grants(self):
+        collection_url = (
+            '/projects/%(project_id)s/groups/%(group_id)s/roles' % {
+                'project_id': self.project_id,
+                'group_id': self.group_id})
+        member_url = '%(collection_url)s/%(role_id)s' % {
+            'collection_url': collection_url,
+            'role_id': self.role_id}
+
+        self.put(member_url)
+        self.head(member_url)
+        r = self.get(collection_url)
+        self.assertValidRoleListResponse(r, ref=self.role,
+                                         resource_url=collection_url)
+
+        self.delete(member_url)
+        r = self.get(collection_url)
+        self.assertValidRoleListResponse(r, expected_length=0,
+                                         resource_url=collection_url)
+
+    def test_crud_group_project_role_grants_no_group(self):
+        """Grant role on a project to a group that doesn't exist, 404 result.
+
+        When grant a role on a project to a group that doesn't exist, the
+        server returns 404 Not Found for the group.
+
+        """
+
+        group_id = uuid.uuid4().hex
+
+        collection_url = (
+            '/projects/%(project_id)s/groups/%(group_id)s/roles' % {
+                'project_id': self.project_id,
+                'group_id': group_id})
+        member_url = '%(collection_url)s/%(role_id)s' % {
+            'collection_url': collection_url,
+            'role_id': self.role_id}
+
+        self.put(member_url, expected_status=404)
+
+    def test_crud_group_domain_role_grants(self):
+        collection_url = (
+            '/domains/%(domain_id)s/groups/%(group_id)s/roles' % {
+                'domain_id': self.domain_id,
+                'group_id': self.group_id})
+        member_url = '%(collection_url)s/%(role_id)s' % {
+            'collection_url': collection_url,
+            'role_id': self.role_id}
+
+        self.put(member_url)
+        self.head(member_url)
+        r = self.get(collection_url)
+        self.assertValidRoleListResponse(r, ref=self.role,
+                                         resource_url=collection_url)
+
+        self.delete(member_url)
+        r = self.get(collection_url)
+        self.assertValidRoleListResponse(r, expected_length=0,
+                                         resource_url=collection_url)
+
+    def test_crud_group_domain_role_grants_no_group(self):
+        """Grant role on a domain to a group that doesn't exist, 404 result.
+
+        When grant a role on a domain to a group that doesn't exist, the server
+        returns 404 Not Found for the group.
+
+        """
+
+        group_id = uuid.uuid4().hex
+
+        collection_url = (
+            '/domains/%(domain_id)s/groups/%(group_id)s/roles' % {
+                'domain_id': self.domain_id,
+                'group_id': group_id})
+        member_url = '%(collection_url)s/%(role_id)s' % {
+            'collection_url': collection_url,
+            'role_id': self.role_id}
+
+        self.put(member_url, expected_status=404)
+
+    def _create_new_user_and_assign_role_on_project(self):
+        """Create a new user and assign user a role on a project."""
+        # Create a new user
+        new_user = self.new_user_ref(domain_id=self.domain_id)
+        user_ref = self.identity_api.create_user(new_user)
+        # Assign the user a role on the project
+        collection_url = (
+            '/projects/%(project_id)s/users/%(user_id)s/roles' % {
+                'project_id': self.project_id,
+                'user_id': user_ref['id']})
+        member_url = ('%(collection_url)s/%(role_id)s' % {
+            'collection_url': collection_url,
+            'role_id': self.role_id})
+        self.put(member_url, expected_status=204)
+        # Check the user has the role assigned
+        self.head(member_url, expected_status=204)
+        return member_url, user_ref
+
+    def test_delete_user_before_removing_role_assignment_succeeds(self):
+        """Call ``DELETE`` on the user before the role assignment."""
+        member_url, user = self._create_new_user_and_assign_role_on_project()
+        # Delete the user from identity backend
+        self.identity_api.driver.delete_user(user['id'])
+        # Clean up the role assignment
+        self.delete(member_url, expected_status=204)
+        # Make sure the role is gone
+        self.head(member_url, expected_status=404)
+
+    def test_delete_user_and_check_role_assignment_fails(self):
+        """Call ``DELETE`` on the user and check the role assignment."""
+        member_url, user = self._create_new_user_and_assign_role_on_project()
+        # Delete the user from identity backend
+        self.identity_api.delete_user(user['id'])
+        # We should get a 404 when looking for the user in the identity
+        # backend because we're not performing a delete operation on the role.
+        self.head(member_url, expected_status=404)
+
+    def test_token_revoked_once_group_role_grant_revoked(self):
+        """Test token is revoked when group role grant is revoked
+
+        When a role granted to a group is revoked for a given scope,
+        all tokens related to this scope and belonging to one of the members
+        of this group should be revoked.
+
+        The revocation should be independently to the presence
+        of the revoke API.
+        """
+        # creates grant from group on project.
+        self.assignment_api.create_grant(role_id=self.role['id'],
+                                         project_id=self.project['id'],
+                                         group_id=self.group['id'])
+
+        # adds user to the group.
+        self.identity_api.add_user_to_group(user_id=self.user['id'],
+                                            group_id=self.group['id'])
+
+        # creates a token for the user
+        auth_body = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=self.project['id'])
+        token_resp = self.post('/auth/tokens', body=auth_body)
+        token = token_resp.headers.get('x-subject-token')
+
+        # validates the returned token; it should be valid.
+        self.head('/auth/tokens',
+                  headers={'x-subject-token': token},
+                  expected_status=200)
+
+        # revokes the grant from group on project.
+        self.assignment_api.delete_grant(role_id=self.role['id'],
+                                         project_id=self.project['id'],
+                                         group_id=self.group['id'])
+
+        # validates the same token again; it should not longer be valid.
+        self.head('/auth/tokens',
+                  headers={'x-subject-token': token},
+                  expected_status=404)
+
+    # Role Assignments tests
+
+    def test_get_role_assignments(self):
+        """Call ``GET /role_assignments``.
+
+        The sample data set up already has a user, group and project
+        that is part of self.domain. We use these plus a new user
+        we create as our data set, making sure we ignore any
+        role assignments that are already in existence.
+
+        Since we don't yet support a first class entity for role
+        assignments, we are only testing the LIST API.  To create
+        and delete the role assignments we use the old grant APIs.
+
+        Test Plan:
+
+        - Create extra user for tests
+        - Get a list of all existing role assignments
+        - Add a new assignment for each of the four combinations, i.e.
+          group+domain, user+domain, group+project, user+project, using
+          the same role each time
+        - Get a new list of all role assignments, checking these four new
+          ones have been added
+        - Then delete the four we added
+        - Get a new list of all role assignments, checking the four have
+          been removed
+
+        """
+
+        # Since the default fixtures already assign some roles to the
+        # user it creates, we also need a new user that will not have any
+        # existing assignments
+        self.user1 = self.new_user_ref(
+            domain_id=self.domain['id'])
+        self.user1 = self.identity_api.create_user(self.user1)
+
+        collection_url = '/role_assignments'
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(r,
+                                                   resource_url=collection_url)
+        existing_assignments = len(r.result.get('role_assignments'))
+
+        # Now add one of each of the four types of assignment, making sure
+        # that we get them all back.
+        gd_entity = _build_role_assignment_entity(domain_id=self.domain_id,
+                                                  group_id=self.group_id,
+                                                  role_id=self.role_id)
+        self.put(gd_entity['links']['assignment'])
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(
+            r,
+            expected_length=existing_assignments + 1,
+            resource_url=collection_url)
+        self.assertRoleAssignmentInListResponse(r, gd_entity)
+
+        ud_entity = _build_role_assignment_entity(domain_id=self.domain_id,
+                                                  user_id=self.user1['id'],
+                                                  role_id=self.role_id)
+        self.put(ud_entity['links']['assignment'])
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(
+            r,
+            expected_length=existing_assignments + 2,
+            resource_url=collection_url)
+        self.assertRoleAssignmentInListResponse(r, ud_entity)
+
+        gp_entity = _build_role_assignment_entity(project_id=self.project_id,
+                                                  group_id=self.group_id,
+                                                  role_id=self.role_id)
+        self.put(gp_entity['links']['assignment'])
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(
+            r,
+            expected_length=existing_assignments + 3,
+            resource_url=collection_url)
+        self.assertRoleAssignmentInListResponse(r, gp_entity)
+
+        up_entity = _build_role_assignment_entity(project_id=self.project_id,
+                                                  user_id=self.user1['id'],
+                                                  role_id=self.role_id)
+        self.put(up_entity['links']['assignment'])
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(
+            r,
+            expected_length=existing_assignments + 4,
+            resource_url=collection_url)
+        self.assertRoleAssignmentInListResponse(r, up_entity)
+
+        # Now delete the four we added and make sure they are removed
+        # from the collection.
+
+        self.delete(gd_entity['links']['assignment'])
+        self.delete(ud_entity['links']['assignment'])
+        self.delete(gp_entity['links']['assignment'])
+        self.delete(up_entity['links']['assignment'])
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(
+            r,
+            expected_length=existing_assignments,
+            resource_url=collection_url)
+        self.assertRoleAssignmentNotInListResponse(r, gd_entity)
+        self.assertRoleAssignmentNotInListResponse(r, ud_entity)
+        self.assertRoleAssignmentNotInListResponse(r, gp_entity)
+        self.assertRoleAssignmentNotInListResponse(r, up_entity)
+
+    def test_get_effective_role_assignments(self):
+        """Call ``GET /role_assignments?effective``.
+
+        Test Plan:
+
+        - Create two extra user for tests
+        - Add these users to a group
+        - Add a role assignment for the group on a domain
+        - Get a list of all role assignments, checking one has been added
+        - Then get a list of all effective role assignments - the group
+          assignment should have turned into assignments on the domain
+          for each of the group members.
+
+        """
+        self.user1 = self.new_user_ref(
+            domain_id=self.domain['id'])
+        password = self.user1['password']
+        self.user1 = self.identity_api.create_user(self.user1)
+        self.user1['password'] = password
+        self.user2 = self.new_user_ref(
+            domain_id=self.domain['id'])
+        password = self.user2['password']
+        self.user2 = self.identity_api.create_user(self.user2)
+        self.user2['password'] = password
+        self.identity_api.add_user_to_group(self.user1['id'], self.group['id'])
+        self.identity_api.add_user_to_group(self.user2['id'], self.group['id'])
+
+        collection_url = '/role_assignments'
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(r,
+                                                   resource_url=collection_url)
+        existing_assignments = len(r.result.get('role_assignments'))
+
+        gd_entity = _build_role_assignment_entity(domain_id=self.domain_id,
+                                                  group_id=self.group_id,
+                                                  role_id=self.role_id)
+        self.put(gd_entity['links']['assignment'])
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(
+            r,
+            expected_length=existing_assignments + 1,
+            resource_url=collection_url)
+        self.assertRoleAssignmentInListResponse(r, gd_entity)
+
+        # Now re-read the collection asking for effective roles - this
+        # should mean the group assignment is translated into the two
+        # member user assignments
+        collection_url = '/role_assignments?effective'
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(
+            r,
+            expected_length=existing_assignments + 2,
+            resource_url=collection_url)
+        ud_entity = _build_role_assignment_entity(
+            link=gd_entity['links']['assignment'], domain_id=self.domain_id,
+            user_id=self.user1['id'], role_id=self.role_id)
+        self.assertRoleAssignmentInListResponse(r, ud_entity)
+        ud_entity = _build_role_assignment_entity(
+            link=gd_entity['links']['assignment'], domain_id=self.domain_id,
+            user_id=self.user2['id'], role_id=self.role_id)
+        self.assertRoleAssignmentInListResponse(r, ud_entity)
+
+    def test_check_effective_values_for_role_assignments(self):
+        """Call ``GET /role_assignments?effective=value``.
+
+        Check the various ways of specifying the 'effective'
+        query parameter.  If the 'effective' query parameter
+        is included then this should always be treated as meaning 'True'
+        unless it is specified as:
+
+        {url}?effective=0
+
+        This is by design to match the agreed way of handling
+        policy checking on query/filter parameters.
+
+        Test Plan:
+
+        - Create two extra user for tests
+        - Add these users to a group
+        - Add a role assignment for the group on a domain
+        - Get a list of all role assignments, checking one has been added
+        - Then issue various request with different ways of defining
+          the 'effective' query parameter. As we have tested the
+          correctness of the data coming back when we get effective roles
+          in other tests, here we just use the count of entities to
+          know if we are getting effective roles or not
+
+        """
+        self.user1 = self.new_user_ref(
+            domain_id=self.domain['id'])
+        password = self.user1['password']
+        self.user1 = self.identity_api.create_user(self.user1)
+        self.user1['password'] = password
+        self.user2 = self.new_user_ref(
+            domain_id=self.domain['id'])
+        password = self.user2['password']
+        self.user2 = self.identity_api.create_user(self.user2)
+        self.user2['password'] = password
+        self.identity_api.add_user_to_group(self.user1['id'], self.group['id'])
+        self.identity_api.add_user_to_group(self.user2['id'], self.group['id'])
+
+        collection_url = '/role_assignments'
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(r,
+                                                   resource_url=collection_url)
+        existing_assignments = len(r.result.get('role_assignments'))
+
+        gd_entity = _build_role_assignment_entity(domain_id=self.domain_id,
+                                                  group_id=self.group_id,
+                                                  role_id=self.role_id)
+        self.put(gd_entity['links']['assignment'])
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(
+            r,
+            expected_length=existing_assignments + 1,
+            resource_url=collection_url)
+        self.assertRoleAssignmentInListResponse(r, gd_entity)
+
+        # Now re-read the collection asking for effective roles,
+        # using the most common way of defining "effective'. This
+        # should mean the group assignment is translated into the two
+        # member user assignments
+        collection_url = '/role_assignments?effective'
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(
+            r,
+            expected_length=existing_assignments + 2,
+            resource_url=collection_url)
+        # Now set 'effective' to false explicitly - should get
+        # back the regular roles
+        collection_url = '/role_assignments?effective=0'
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(
+            r,
+            expected_length=existing_assignments + 1,
+            resource_url=collection_url)
+        # Now try setting  'effective' to 'False' explicitly- this is
+        # NOT supported as a way of setting a query or filter
+        # parameter to false by design. Hence we should get back
+        # effective roles.
+        collection_url = '/role_assignments?effective=False'
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(
+            r,
+            expected_length=existing_assignments + 2,
+            resource_url=collection_url)
+        # Now set 'effective' to True explicitly
+        collection_url = '/role_assignments?effective=True'
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(
+            r,
+            expected_length=existing_assignments + 2,
+            resource_url=collection_url)
+
+    def test_filtered_role_assignments(self):
+        """Call ``GET /role_assignments?filters``.
+
+        Test Plan:
+
+        - Create extra users, group, role and project for tests
+        - Make the following assignments:
+          Give group1, role1 on project1 and domain
+          Give user1, role2 on project1 and domain
+          Make User1 a member of Group1
+        - Test a series of single filter list calls, checking that
+          the correct results are obtained
+        - Test a multi-filtered list call
+        - Test listing all effective roles for a given user
+        - Test the equivalent of the list of roles in a project scoped
+          token (all effective roles for a user on a project)
+
+        """
+
+        # Since the default fixtures already assign some roles to the
+        # user it creates, we also need a new user that will not have any
+        # existing assignments
+        self.user1 = self.new_user_ref(
+            domain_id=self.domain['id'])
+        password = self.user1['password']
+        self.user1 = self.identity_api.create_user(self.user1)
+        self.user1['password'] = password
+        self.user2 = self.new_user_ref(
+            domain_id=self.domain['id'])
+        password = self.user2['password']
+        self.user2 = self.identity_api.create_user(self.user2)
+        self.user2['password'] = password
+        self.group1 = self.new_group_ref(
+            domain_id=self.domain['id'])
+        self.group1 = self.identity_api.create_group(self.group1)
+        self.identity_api.add_user_to_group(self.user1['id'],
+                                            self.group1['id'])
+        self.identity_api.add_user_to_group(self.user2['id'],
+                                            self.group1['id'])
+        self.project1 = self.new_project_ref(
+            domain_id=self.domain['id'])
+        self.resource_api.create_project(self.project1['id'], self.project1)
+        self.role1 = self.new_role_ref()
+        self.role_api.create_role(self.role1['id'], self.role1)
+        self.role2 = self.new_role_ref()
+        self.role_api.create_role(self.role2['id'], self.role2)
+
+        # Now add one of each of the four types of assignment
+
+        gd_entity = _build_role_assignment_entity(domain_id=self.domain_id,
+                                                  group_id=self.group1['id'],
+                                                  role_id=self.role1['id'])
+        self.put(gd_entity['links']['assignment'])
+
+        ud_entity = _build_role_assignment_entity(domain_id=self.domain_id,
+                                                  user_id=self.user1['id'],
+                                                  role_id=self.role2['id'])
+        self.put(ud_entity['links']['assignment'])
+
+        gp_entity = _build_role_assignment_entity(
+            project_id=self.project1['id'], group_id=self.group1['id'],
+            role_id=self.role1['id'])
+        self.put(gp_entity['links']['assignment'])
+
+        up_entity = _build_role_assignment_entity(
+            project_id=self.project1['id'], user_id=self.user1['id'],
+            role_id=self.role2['id'])
+        self.put(up_entity['links']['assignment'])
+
+        # Now list by various filters to make sure we get back the right ones
+
+        collection_url = ('/role_assignments?scope.project.id=%s' %
+                          self.project1['id'])
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(r,
+                                                   expected_length=2,
+                                                   resource_url=collection_url)
+        self.assertRoleAssignmentInListResponse(r, up_entity)
+        self.assertRoleAssignmentInListResponse(r, gp_entity)
+
+        collection_url = ('/role_assignments?scope.domain.id=%s' %
+                          self.domain['id'])
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(r,
+                                                   expected_length=2,
+                                                   resource_url=collection_url)
+        self.assertRoleAssignmentInListResponse(r, ud_entity)
+        self.assertRoleAssignmentInListResponse(r, gd_entity)
+
+        collection_url = '/role_assignments?user.id=%s' % self.user1['id']
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(r,
+                                                   expected_length=2,
+                                                   resource_url=collection_url)
+        self.assertRoleAssignmentInListResponse(r, up_entity)
+        self.assertRoleAssignmentInListResponse(r, ud_entity)
+
+        collection_url = '/role_assignments?group.id=%s' % self.group1['id']
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(r,
+                                                   expected_length=2,
+                                                   resource_url=collection_url)
+        self.assertRoleAssignmentInListResponse(r, gd_entity)
+        self.assertRoleAssignmentInListResponse(r, gp_entity)
+
+        collection_url = '/role_assignments?role.id=%s' % self.role1['id']
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(r,
+                                                   expected_length=2,
+                                                   resource_url=collection_url)
+        self.assertRoleAssignmentInListResponse(r, gd_entity)
+        self.assertRoleAssignmentInListResponse(r, gp_entity)
+
+        # Let's try combining two filers together....
+
+        collection_url = (
+            '/role_assignments?user.id=%(user_id)s'
+            '&scope.project.id=%(project_id)s' % {
+                'user_id': self.user1['id'],
+                'project_id': self.project1['id']})
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(r,
+                                                   expected_length=1,
+                                                   resource_url=collection_url)
+        self.assertRoleAssignmentInListResponse(r, up_entity)
+
+        # Now for a harder one - filter for user with effective
+        # roles - this should return role assignment that were directly
+        # assigned as well as by virtue of group membership
+
+        collection_url = ('/role_assignments?effective&user.id=%s' %
+                          self.user1['id'])
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(r,
+                                                   expected_length=4,
+                                                   resource_url=collection_url)
+        # Should have the two direct roles...
+        self.assertRoleAssignmentInListResponse(r, up_entity)
+        self.assertRoleAssignmentInListResponse(r, ud_entity)
+        # ...and the two via group membership...
+        gp1_link = _build_role_assignment_link(project_id=self.project1['id'],
+                                               group_id=self.group1['id'],
+                                               role_id=self.role1['id'])
+        gd1_link = _build_role_assignment_link(domain_id=self.domain_id,
+                                               group_id=self.group1['id'],
+                                               role_id=self.role1['id'])
+
+        up1_entity = _build_role_assignment_entity(
+            link=gp1_link, project_id=self.project1['id'],
+            user_id=self.user1['id'], role_id=self.role1['id'])
+        ud1_entity = _build_role_assignment_entity(
+            link=gd1_link, domain_id=self.domain_id, user_id=self.user1['id'],
+            role_id=self.role1['id'])
+        self.assertRoleAssignmentInListResponse(r, up1_entity)
+        self.assertRoleAssignmentInListResponse(r, ud1_entity)
+
+        # ...and for the grand-daddy of them all, simulate the request
+        # that would generate the list of effective roles in a project
+        # scoped token.
+
+        collection_url = (
+            '/role_assignments?effective&user.id=%(user_id)s'
+            '&scope.project.id=%(project_id)s' % {
+                'user_id': self.user1['id'],
+                'project_id': self.project1['id']})
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(r,
+                                                   expected_length=2,
+                                                   resource_url=collection_url)
+        # Should have one direct role and one from group membership...
+        self.assertRoleAssignmentInListResponse(r, up_entity)
+        self.assertRoleAssignmentInListResponse(r, up1_entity)
+
+
+class RoleAssignmentBaseTestCase(test_v3.RestfulTestCase):
+    """Base class for testing /v3/role_assignments API behavior."""
+
+    MAX_HIERARCHY_BREADTH = 3
+    MAX_HIERARCHY_DEPTH = CONF.max_project_tree_depth - 1
+
+    def load_sample_data(self):
+        """Creates sample data to be used on tests.
+
+        Created data are i) a role and ii) a domain containing: a project
+        hierarchy and 3 users within 3 groups.
+
+        """
+        def create_project_hierarchy(parent_id, depth):
+            "Creates a random project hierarchy."
+            if depth == 0:
+                return
+
+            breadth = random.randint(1, self.MAX_HIERARCHY_BREADTH)
+
+            subprojects = []
+            for i in range(breadth):
+                subprojects.append(self.new_project_ref(
+                    domain_id=self.domain_id, parent_id=parent_id))
+                self.assignment_api.create_project(subprojects[-1]['id'],
+                                                   subprojects[-1])
+
+            new_parent = subprojects[random.randint(0, breadth - 1)]
+            create_project_hierarchy(new_parent['id'], depth - 1)
+
+        super(RoleAssignmentBaseTestCase, self).load_sample_data()
+
+        # Create a domain
+        self.domain = self.new_domain_ref()
+        self.domain_id = self.domain['id']
+        self.assignment_api.create_domain(self.domain_id, self.domain)
+
+        # Create a project hierarchy
+        self.project = self.new_project_ref(domain_id=self.domain_id)
+        self.project_id = self.project['id']
+        self.assignment_api.create_project(self.project_id, self.project)
+
+        # Create a random project hierarchy
+        create_project_hierarchy(self.project_id,
+                                 random.randint(1, self.MAX_HIERARCHY_DEPTH))
+
+        # Create 3 users
+        self.user_ids = []
+        for i in range(3):
+            user = self.new_user_ref(domain_id=self.domain_id)
+            user = self.identity_api.create_user(user)
+            self.user_ids.append(user['id'])
+
+        # Create 3 groups
+        self.group_ids = []
+        for i in range(3):
+            group = self.new_group_ref(domain_id=self.domain_id)
+            group = self.identity_api.create_group(group)
+            self.group_ids.append(group['id'])
+
+            # Put 2 members on each group
+            self.identity_api.add_user_to_group(user_id=self.user_ids[i],
+                                                group_id=group['id'])
+            self.identity_api.add_user_to_group(user_id=self.user_ids[i % 2],
+                                                group_id=group['id'])
+
+        self.assignment_api.create_grant(user_id=self.user_id,
+                                         project_id=self.project_id,
+                                         role_id=self.role_id)
+
+        # Create a role
+        self.role = self.new_role_ref()
+        self.role_id = self.role['id']
+        self.assignment_api.create_role(self.role_id, self.role)
+
+        # Set default user and group to be used on tests
+        self.default_user_id = self.user_ids[0]
+        self.default_group_id = self.group_ids[0]
+
+    def get_role_assignments(self, expected_status=200, **filters):
+        """Returns the result from querying role assignment API + queried URL.
+
+        Calls GET /v3/role_assignments?<params> and returns its result, where
+        <params> is the HTTP query parameters form of effective option plus
+        filters, if provided. Queried URL is returned as well.
+
+        :returns: a tuple containing the list role assignments API response and
+                  queried URL.
+
+        """
+
+        query_url = self._get_role_assignments_query_url(**filters)
+        response = self.get(query_url, expected_status=expected_status)
+
+        return (response, query_url)
+
+    def _get_role_assignments_query_url(self, **filters):
+        """Returns non-effective role assignments query URL from given filters.
+
+        :param filters: query parameters are created with the provided filters
+                        on role assignments attributes. Valid filters are:
+                        role_id, domain_id, project_id, group_id, user_id and
+                        inherited_to_projects.
+
+        :returns: role assignments query URL.
+
+        """
+        return _build_role_assignment_query_url(**filters)
+
+
+class RoleAssignmentFailureTestCase(RoleAssignmentBaseTestCase):
+    """Class for testing invalid query params on /v3/role_assignments API.
+
+    Querying domain and project, or user and group results in a HTTP 400, since
+    a role assignment must contain only a single pair of (actor, target). In
+    addition, since filtering on role assignments applies only to the final
+    result, effective mode cannot be combined with i) group or ii) domain and
+    inherited, because it would always result in an empty list.
+
+    """
+
+    def test_get_role_assignments_by_domain_and_project(self):
+        self.get_role_assignments(domain_id=self.domain_id,
+                                  project_id=self.project_id,
+                                  expected_status=400)
+
+    def test_get_role_assignments_by_user_and_group(self):
+        self.get_role_assignments(user_id=self.default_user_id,
+                                  group_id=self.default_group_id,
+                                  expected_status=400)
+
+    def test_get_role_assignments_by_effective_and_inherited(self):
+        self.config_fixture.config(group='os_inherit', enabled=True)
+
+        self.get_role_assignments(domain_id=self.domain_id, effective=True,
+                                  inherited_to_projects=True,
+                                  expected_status=400)
+
+    def test_get_role_assignments_by_effective_and_group(self):
+        self.get_role_assignments(effective=True,
+                                  group_id=self.default_group_id,
+                                  expected_status=400)
+
+
+class RoleAssignmentDirectTestCase(RoleAssignmentBaseTestCase):
+    """Class for testing direct assignments on /v3/role_assignments API.
+
+    Direct assignments on a domain or project have effect on them directly,
+    instead of on their project hierarchy, i.e they are non-inherited. In
+    addition, group direct assignments are not expanded to group's users.
+
+    Tests on this class make assertions on the representation and API filtering
+    of direct assignments.
+
+    """
+
+    def _test_get_role_assignments(self, **filters):
+        """Generic filtering test method.
+
+        According to the provided filters, this method:
+        - creates a new role assignment;
+        - asserts that list role assignments API reponds correctly;
+        - deletes the created role assignment.
+
+        :param filters: filters to be considered when listing role assignments.
+                        Valid filters are: role_id, domain_id, project_id,
+                        group_id, user_id and inherited_to_projects.
+
+        """
+
+        # Fills default assignment with provided filters
+        test_assignment = self._set_default_assignment_attributes(**filters)
+
+        # Create new role assignment for this test
+        self.assignment_api.create_grant(**test_assignment)
+
+        # Get expected role assignments
+        expected_assignments = self._list_expected_role_assignments(
+            **test_assignment)
+
+        # Get role assignments from API
+        response, query_url = self.get_role_assignments(**test_assignment)
+        self.assertValidRoleAssignmentListResponse(response,
+                                                   resource_url=query_url)
+        self.assertEqual(len(expected_assignments),
+                         len(response.result.get('role_assignments')))
+
+        # Assert that expected role assignments were returned by the API call
+        for assignment in expected_assignments:
+            self.assertRoleAssignmentInListResponse(response, assignment)
+
+        # Delete created role assignment
+        self.assignment_api.delete_grant(**test_assignment)
+
+    def _set_default_assignment_attributes(self, **attribs):
+        """Inserts default values for missing attributes of role assignment.
+
+        If no actor, target or role are provided, they will default to values
+        from sample data.
+
+        :param attribs: info from a role assignment entity. Valid attributes
+                        are: role_id, domain_id, project_id, group_id, user_id
+                        and inherited_to_projects.
+
+        """
+        if not any(target in attribs
+                   for target in ('domain_id', 'projects_id')):
+            attribs['project_id'] = self.project_id
+
+        if not any(actor in attribs for actor in ('user_id', 'group_id')):
+            attribs['user_id'] = self.default_user_id
+
+        if 'role_id' not in attribs:
+            attribs['role_id'] = self.role_id
+
+        return attribs
+
+    def _list_expected_role_assignments(self, **filters):
+        """Given the filters, it returns expected direct role assignments.
+
+        :param filters: filters that will be considered when listing role
+                        assignments. Valid filters are: role_id, domain_id,
+                        project_id, group_id, user_id and
+                        inherited_to_projects.
+
+        :returns: the list of the expected role assignments.
+
+        """
+        return [_build_role_assignment_entity(**filters)]
+
+    # Test cases below call the generic test method, providing different filter
+    # combinations. Filters are provided as specified in the method name, after
+    # 'by'. For example, test_get_role_assignments_by_project_user_and_role
+    # calls the generic test method with project_id, user_id and role_id.
+
+    def test_get_role_assignments_by_domain(self, **filters):
+        self._test_get_role_assignments(domain_id=self.domain_id, **filters)
+
+    def test_get_role_assignments_by_project(self, **filters):
+        self._test_get_role_assignments(project_id=self.project_id, **filters)
+
+    def test_get_role_assignments_by_user(self, **filters):
+        self._test_get_role_assignments(user_id=self.default_user_id,
+                                        **filters)
+
+    def test_get_role_assignments_by_group(self, **filters):
+        self._test_get_role_assignments(group_id=self.default_group_id,
+                                        **filters)
+
+    def test_get_role_assignments_by_role(self, **filters):
+        self._test_get_role_assignments(role_id=self.role_id, **filters)
+
+    def test_get_role_assignments_by_domain_and_user(self, **filters):
+        self.test_get_role_assignments_by_domain(user_id=self.default_user_id,
+                                                 **filters)
+
+    def test_get_role_assignments_by_domain_and_group(self, **filters):
+        self.test_get_role_assignments_by_domain(
+            group_id=self.default_group_id, **filters)
+
+    def test_get_role_assignments_by_project_and_user(self, **filters):
+        self.test_get_role_assignments_by_project(user_id=self.default_user_id,
+                                                  **filters)
+
+    def test_get_role_assignments_by_project_and_group(self, **filters):
+        self.test_get_role_assignments_by_project(
+            group_id=self.default_group_id, **filters)
+
+    def test_get_role_assignments_by_domain_user_and_role(self, **filters):
+        self.test_get_role_assignments_by_domain_and_user(role_id=self.role_id,
+                                                          **filters)
+
+    def test_get_role_assignments_by_domain_group_and_role(self, **filters):
+        self.test_get_role_assignments_by_domain_and_group(
+            role_id=self.role_id, **filters)
+
+    def test_get_role_assignments_by_project_user_and_role(self, **filters):
+        self.test_get_role_assignments_by_project_and_user(
+            role_id=self.role_id, **filters)
+
+    def test_get_role_assignments_by_project_group_and_role(self, **filters):
+        self.test_get_role_assignments_by_project_and_group(
+            role_id=self.role_id, **filters)
+
+
+class RoleAssignmentInheritedTestCase(RoleAssignmentDirectTestCase):
+    """Class for testing inherited assignments on /v3/role_assignments API.
+
+    Inherited assignments on a domain or project have no effect on them
+    directly, but on the projects under them instead.
+
+    Tests on this class do not make assertions on the effect of inherited
+    assignments, but in their representation and API filtering.
+
+    """
+
+    def config_overrides(self):
+        super(RoleAssignmentBaseTestCase, self).config_overrides()
+        self.config_fixture.config(group='os_inherit', enabled=True)
+
+    def _test_get_role_assignments(self, **filters):
+        """Adds inherited_to_project filter to expected entity in tests."""
+        super(RoleAssignmentInheritedTestCase,
+              self)._test_get_role_assignments(inherited_to_projects=True,
+                                               **filters)
+
+
+class RoleAssignmentEffectiveTestCase(RoleAssignmentInheritedTestCase):
+    """Class for testing inheritance effects on /v3/role_assignments API.
+
+    Inherited assignments on a domain or project have no effect on them
+    directly, but on the projects under them instead.
+
+    Tests on this class make assertions on the effect of inherited assignments
+    and API filtering.
+
+    """
+
+    def _get_role_assignments_query_url(self, **filters):
+        """Returns effective role assignments query URL from given filters.
+
+        For test methods in this class, effetive will always be true. As in
+        effective mode, inherited_to_projects, group_id, domain_id and
+        project_id will always be desconsidered from provided filters.
+
+        :param filters: query parameters are created with the provided filters.
+                        Valid filters are: role_id, domain_id, project_id,
+                        group_id, user_id and inherited_to_projects.
+
+        :returns: role assignments query URL.
+
+        """
+        query_filters = filters.copy()
+        query_filters.pop('inherited_to_projects')
+
+        query_filters.pop('group_id', None)
+        query_filters.pop('domain_id', None)
+        query_filters.pop('project_id', None)
+
+        return _build_role_assignment_query_url(effective=True,
+                                                **query_filters)
+
+    def _list_expected_role_assignments(self, **filters):
+        """Given the filters, it returns expected direct role assignments.
+
+        :param filters: filters that will be considered when listing role
+                        assignments. Valid filters are: role_id, domain_id,
+                        project_id, group_id, user_id and
+                        inherited_to_projects.
+
+        :returns: the list of the expected role assignments.
+
+        """
+        # Get assignment link, to be put on 'links': {'assignment': link}
+        assignment_link = _build_role_assignment_link(**filters)
+
+        # Expand group membership
+        user_ids = [None]
+        if filters.get('group_id'):
+            user_ids = [user['id'] for user in
+                        self.identity_api.list_users_in_group(
+                            filters['group_id'])]
+        else:
+            user_ids = [self.default_user_id]
+
+        # Expand role inheritance
+        project_ids = [None]
+        if filters.get('domain_id'):
+            project_ids = [project['id'] for project in
+                           self.assignment_api.list_projects_in_domain(
+                               filters.pop('domain_id'))]
+        else:
+            project_ids = [project['id'] for project in
+                           self.assignment_api.list_projects_in_subtree(
+                               self.project_id)]
+
+        # Compute expected role assignments
+        assignments = []
+        for project_id in project_ids:
+            filters['project_id'] = project_id
+            for user_id in user_ids:
+                filters['user_id'] = user_id
+                assignments.append(_build_role_assignment_entity(
+                    link=assignment_link, **filters))
+
+        return assignments
+
+
+class AssignmentInheritanceTestCase(test_v3.RestfulTestCase):
+    """Test inheritance crud and its effects."""
+
+    def config_overrides(self):
+        super(AssignmentInheritanceTestCase, self).config_overrides()
+        self.config_fixture.config(group='os_inherit', enabled=True)
+
+    def test_get_token_from_inherited_user_domain_role_grants(self):
+        # Create a new user to ensure that no grant is loaded from sample data
+        user = self.new_user_ref(domain_id=self.domain_id)
+        password = user['password']
+        user = self.identity_api.create_user(user)
+        user['password'] = password
+
+        # Define domain and project authentication data
+        domain_auth_data = self.build_authentication_request(
+            user_id=user['id'],
+            password=user['password'],
+            domain_id=self.domain_id)
+        project_auth_data = self.build_authentication_request(
+            user_id=user['id'],
+            password=user['password'],
+            project_id=self.project_id)
+
+        # Check the user cannot get a domain nor a project token
+        self.v3_authenticate_token(domain_auth_data, expected_status=401)
+        self.v3_authenticate_token(project_auth_data, expected_status=401)
+
+        # Grant non-inherited role for user on domain
+        non_inher_ud_link = _build_role_assignment_link(
+            domain_id=self.domain_id, user_id=user['id'], role_id=self.role_id)
+        self.put(non_inher_ud_link)
+
+        # Check the user can get only a domain token
+        self.v3_authenticate_token(domain_auth_data)
+        self.v3_authenticate_token(project_auth_data, expected_status=401)
+
+        # Create inherited role
+        inherited_role = {'id': uuid.uuid4().hex, 'name': 'inherited'}
+        self.role_api.create_role(inherited_role['id'], inherited_role)
+
+        # Grant inherited role for user on domain
+        inher_ud_link = _build_role_assignment_link(
+            domain_id=self.domain_id, user_id=user['id'],
+            role_id=inherited_role['id'], inherited_to_projects=True)
+        self.put(inher_ud_link)
+
+        # Check the user can get both a domain and a project token
+        self.v3_authenticate_token(domain_auth_data)
+        self.v3_authenticate_token(project_auth_data)
+
+        # Delete inherited grant
+        self.delete(inher_ud_link)
+
+        # Check the user can only get a domain token
+        self.v3_authenticate_token(domain_auth_data)
+        self.v3_authenticate_token(project_auth_data, expected_status=401)
+
+        # Delete non-inherited grant
+        self.delete(non_inher_ud_link)
+
+        # Check the user cannot get a domain token anymore
+        self.v3_authenticate_token(domain_auth_data, expected_status=401)
+
+    def test_get_token_from_inherited_group_domain_role_grants(self):
+        # Create a new group and put a new user in it to
+        # ensure that no grant is loaded from sample data
+        user = self.new_user_ref(domain_id=self.domain_id)
+        password = user['password']
+        user = self.identity_api.create_user(user)
+        user['password'] = password
+
+        group = self.new_group_ref(domain_id=self.domain['id'])
+        group = self.identity_api.create_group(group)
+        self.identity_api.add_user_to_group(user['id'], group['id'])
+
+        # Define domain and project authentication data
+        domain_auth_data = self.build_authentication_request(
+            user_id=user['id'],
+            password=user['password'],
+            domain_id=self.domain_id)
+        project_auth_data = self.build_authentication_request(
+            user_id=user['id'],
+            password=user['password'],
+            project_id=self.project_id)
+
+        # Check the user cannot get a domain nor a project token
+        self.v3_authenticate_token(domain_auth_data, expected_status=401)
+        self.v3_authenticate_token(project_auth_data, expected_status=401)
+
+        # Grant non-inherited role for user on domain
+        non_inher_gd_link = _build_role_assignment_link(
+            domain_id=self.domain_id, user_id=user['id'], role_id=self.role_id)
+        self.put(non_inher_gd_link)
+
+        # Check the user can get only a domain token
+        self.v3_authenticate_token(domain_auth_data)
+        self.v3_authenticate_token(project_auth_data, expected_status=401)
+
+        # Create inherited role
+        inherited_role = {'id': uuid.uuid4().hex, 'name': 'inherited'}
+        self.role_api.create_role(inherited_role['id'], inherited_role)
+
+        # Grant inherited role for user on domain
+        inher_gd_link = _build_role_assignment_link(
+            domain_id=self.domain_id, user_id=user['id'],
+            role_id=inherited_role['id'], inherited_to_projects=True)
+        self.put(inher_gd_link)
+
+        # Check the user can get both a domain and a project token
+        self.v3_authenticate_token(domain_auth_data)
+        self.v3_authenticate_token(project_auth_data)
+
+        # Delete inherited grant
+        self.delete(inher_gd_link)
+
+        # Check the user can only get a domain token
+        self.v3_authenticate_token(domain_auth_data)
+        self.v3_authenticate_token(project_auth_data, expected_status=401)
+
+        # Delete non-inherited grant
+        self.delete(non_inher_gd_link)
+
+        # Check the user cannot get a domain token anymore
+        self.v3_authenticate_token(domain_auth_data, expected_status=401)
+
+    def test_crud_user_inherited_domain_role_grants(self):
+        role_list = []
+        for _ in range(2):
+            role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+            self.role_api.create_role(role['id'], role)
+            role_list.append(role)
+
+        # Create a non-inherited role as a spoiler
+        self.assignment_api.create_grant(
+            role_list[1]['id'], user_id=self.user['id'],
+            domain_id=self.domain_id)
+
+        base_collection_url = (
+            '/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % {
+                'domain_id': self.domain_id,
+                'user_id': self.user['id']})
+        member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % {
+            'collection_url': base_collection_url,
+            'role_id': role_list[0]['id']}
+        collection_url = base_collection_url + '/inherited_to_projects'
+
+        self.put(member_url)
+
+        # Check we can read it back
+        self.head(member_url)
+        r = self.get(collection_url)
+        self.assertValidRoleListResponse(r, ref=role_list[0],
+                                         resource_url=collection_url)
+
+        # Now delete and check its gone
+        self.delete(member_url)
+        r = self.get(collection_url)
+        self.assertValidRoleListResponse(r, expected_length=0,
+                                         resource_url=collection_url)
+
+    def test_list_role_assignments_for_inherited_domain_grants(self):
+        """Call ``GET /role_assignments with inherited domain grants``.
+
+        Test Plan:
+
+        - Create 4 roles
+        - Create a domain with a user and two projects
+        - Assign two direct roles to project1
+        - Assign a spoiler role to project2
+        - Issue the URL to add inherited role to the domain
+        - Issue the URL to check it is indeed on the domain
+        - Issue the URL to check effective roles on project1 - this
+          should return 3 roles.
+
+        """
+        role_list = []
+        for _ in range(4):
+            role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+            self.role_api.create_role(role['id'], role)
+            role_list.append(role)
+
+        domain = self.new_domain_ref()
+        self.resource_api.create_domain(domain['id'], domain)
+        user1 = self.new_user_ref(
+            domain_id=domain['id'])
+        password = user1['password']
+        user1 = self.identity_api.create_user(user1)
+        user1['password'] = password
+        project1 = self.new_project_ref(
+            domain_id=domain['id'])
+        self.resource_api.create_project(project1['id'], project1)
+        project2 = self.new_project_ref(
+            domain_id=domain['id'])
+        self.resource_api.create_project(project2['id'], project2)
+        # Add some roles to the project
+        self.assignment_api.add_role_to_user_and_project(
+            user1['id'], project1['id'], role_list[0]['id'])
+        self.assignment_api.add_role_to_user_and_project(
+            user1['id'], project1['id'], role_list[1]['id'])
+        # ..and one on a different project as a spoiler
+        self.assignment_api.add_role_to_user_and_project(
+            user1['id'], project2['id'], role_list[2]['id'])
+
+        # Now create our inherited role on the domain
+        base_collection_url = (
+            '/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % {
+                'domain_id': domain['id'],
+                'user_id': user1['id']})
+        member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % {
+            'collection_url': base_collection_url,
+            'role_id': role_list[3]['id']}
+        collection_url = base_collection_url + '/inherited_to_projects'
+
+        self.put(member_url)
+        self.head(member_url)
+        r = self.get(collection_url)
+        self.assertValidRoleListResponse(r, ref=role_list[3],
+                                         resource_url=collection_url)
+
+        # Now use the list domain role assignments api to check if this
+        # is included
+        collection_url = (
+            '/role_assignments?user.id=%(user_id)s'
+            '&scope.domain.id=%(domain_id)s' % {
+                'user_id': user1['id'],
+                'domain_id': domain['id']})
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(r,
+                                                   expected_length=1,
+                                                   resource_url=collection_url)
+        ud_entity = _build_role_assignment_entity(
+            domain_id=domain['id'], user_id=user1['id'],
+            role_id=role_list[3]['id'], inherited_to_projects=True)
+        self.assertRoleAssignmentInListResponse(r, ud_entity)
+
+        # Now ask for effective list role assignments - the role should
+        # turn into a project role, along with the two direct roles that are
+        # on the project
+        collection_url = (
+            '/role_assignments?effective&user.id=%(user_id)s'
+            '&scope.project.id=%(project_id)s' % {
+                'user_id': user1['id'],
+                'project_id': project1['id']})
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(r,
+                                                   expected_length=3,
+                                                   resource_url=collection_url)
+        # An effective role for an inherited role will be a project
+        # entity, with a domain link to the inherited assignment
+        ud_url = _build_role_assignment_link(
+            domain_id=domain['id'], user_id=user1['id'],
+            role_id=role_list[3]['id'], inherited_to_projects=True)
+        up_entity = _build_role_assignment_entity(link=ud_url,
+                                                  project_id=project1['id'],
+                                                  user_id=user1['id'],
+                                                  role_id=role_list[3]['id'],
+                                                  inherited_to_projects=True)
+        self.assertRoleAssignmentInListResponse(r, up_entity)
+
+    def test_list_role_assignments_for_disabled_inheritance_extension(self):
+        """Call ``GET /role_assignments with inherited domain grants``.
+
+        Test Plan:
+
+        - Issue the URL to add inherited role to the domain
+        - Issue the URL to check effective roles on project include the
+          inherited role
+        - Disable the extension
+        - Re-check the effective roles, proving the inherited role no longer
+          shows up.
+
+        """
+
+        role_list = []
+        for _ in range(4):
+            role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+            self.role_api.create_role(role['id'], role)
+            role_list.append(role)
+
+        domain = self.new_domain_ref()
+        self.resource_api.create_domain(domain['id'], domain)
+        user1 = self.new_user_ref(
+            domain_id=domain['id'])
+        password = user1['password']
+        user1 = self.identity_api.create_user(user1)
+        user1['password'] = password
+        project1 = self.new_project_ref(
+            domain_id=domain['id'])
+        self.resource_api.create_project(project1['id'], project1)
+        project2 = self.new_project_ref(
+            domain_id=domain['id'])
+        self.resource_api.create_project(project2['id'], project2)
+        # Add some roles to the project
+        self.assignment_api.add_role_to_user_and_project(
+            user1['id'], project1['id'], role_list[0]['id'])
+        self.assignment_api.add_role_to_user_and_project(
+            user1['id'], project1['id'], role_list[1]['id'])
+        # ..and one on a different project as a spoiler
+        self.assignment_api.add_role_to_user_and_project(
+            user1['id'], project2['id'], role_list[2]['id'])
+
+        # Now create our inherited role on the domain
+        base_collection_url = (
+            '/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % {
+                'domain_id': domain['id'],
+                'user_id': user1['id']})
+        member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % {
+            'collection_url': base_collection_url,
+            'role_id': role_list[3]['id']}
+        collection_url = base_collection_url + '/inherited_to_projects'
+
+        self.put(member_url)
+        self.head(member_url)
+        r = self.get(collection_url)
+        self.assertValidRoleListResponse(r, ref=role_list[3],
+                                         resource_url=collection_url)
+
+        # Get effective list role assignments - the role should
+        # turn into a project role, along with the two direct roles that are
+        # on the project
+        collection_url = (
+            '/role_assignments?effective&user.id=%(user_id)s'
+            '&scope.project.id=%(project_id)s' % {
+                'user_id': user1['id'],
+                'project_id': project1['id']})
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(r,
+                                                   expected_length=3,
+                                                   resource_url=collection_url)
+
+        ud_url = _build_role_assignment_link(
+            domain_id=domain['id'], user_id=user1['id'],
+            role_id=role_list[3]['id'], inherited_to_projects=True)
+        up_entity = _build_role_assignment_entity(link=ud_url,
+                                                  project_id=project1['id'],
+                                                  user_id=user1['id'],
+                                                  role_id=role_list[3]['id'],
+                                                  inherited_to_projects=True)
+
+        self.assertRoleAssignmentInListResponse(r, up_entity)
+
+        # Disable the extension and re-check the list, the role inherited
+        # from the project should no longer show up
+        self.config_fixture.config(group='os_inherit', enabled=False)
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(r,
+                                                   expected_length=2,
+                                                   resource_url=collection_url)
+
+        self.assertRoleAssignmentNotInListResponse(r, up_entity)
+
+    def test_list_role_assignments_for_inherited_group_domain_grants(self):
+        """Call ``GET /role_assignments with inherited group domain grants``.
+
+        Test Plan:
+
+        - Create 4 roles
+        - Create a domain with a user and two projects
+        - Assign two direct roles to project1
+        - Assign a spoiler role to project2
+        - Issue the URL to add inherited role to the domain
+        - Issue the URL to check it is indeed on the domain
+        - Issue the URL to check effective roles on project1 - this
+          should return 3 roles.
+
+        """
+        role_list = []
+        for _ in range(4):
+            role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+            self.role_api.create_role(role['id'], role)
+            role_list.append(role)
+
+        domain = self.new_domain_ref()
+        self.resource_api.create_domain(domain['id'], domain)
+        user1 = self.new_user_ref(
+            domain_id=domain['id'])
+        password = user1['password']
+        user1 = self.identity_api.create_user(user1)
+        user1['password'] = password
+        user2 = self.new_user_ref(
+            domain_id=domain['id'])
+        password = user2['password']
+        user2 = self.identity_api.create_user(user2)
+        user2['password'] = password
+        group1 = self.new_group_ref(
+            domain_id=domain['id'])
+        group1 = self.identity_api.create_group(group1)
+        self.identity_api.add_user_to_group(user1['id'],
+                                            group1['id'])
+        self.identity_api.add_user_to_group(user2['id'],
+                                            group1['id'])
+        project1 = self.new_project_ref(
+            domain_id=domain['id'])
+        self.resource_api.create_project(project1['id'], project1)
+        project2 = self.new_project_ref(
+            domain_id=domain['id'])
+        self.resource_api.create_project(project2['id'], project2)
+        # Add some roles to the project
+        self.assignment_api.add_role_to_user_and_project(
+            user1['id'], project1['id'], role_list[0]['id'])
+        self.assignment_api.add_role_to_user_and_project(
+            user1['id'], project1['id'], role_list[1]['id'])
+        # ..and one on a different project as a spoiler
+        self.assignment_api.add_role_to_user_and_project(
+            user1['id'], project2['id'], role_list[2]['id'])
+
+        # Now create our inherited role on the domain
+        base_collection_url = (
+            '/OS-INHERIT/domains/%(domain_id)s/groups/%(group_id)s/roles' % {
+                'domain_id': domain['id'],
+                'group_id': group1['id']})
+        member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % {
+            'collection_url': base_collection_url,
+            'role_id': role_list[3]['id']}
+        collection_url = base_collection_url + '/inherited_to_projects'
+
+        self.put(member_url)
+        self.head(member_url)
+        r = self.get(collection_url)
+        self.assertValidRoleListResponse(r, ref=role_list[3],
+                                         resource_url=collection_url)
+
+        # Now use the list domain role assignments api to check if this
+        # is included
+        collection_url = (
+            '/role_assignments?group.id=%(group_id)s'
+            '&scope.domain.id=%(domain_id)s' % {
+                'group_id': group1['id'],
+                'domain_id': domain['id']})
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(r,
+                                                   expected_length=1,
+                                                   resource_url=collection_url)
+        gd_entity = _build_role_assignment_entity(
+            domain_id=domain['id'], group_id=group1['id'],
+            role_id=role_list[3]['id'], inherited_to_projects=True)
+        self.assertRoleAssignmentInListResponse(r, gd_entity)
+
+        # Now ask for effective list role assignments - the role should
+        # turn into a user project role, along with the two direct roles
+        # that are on the project
+        collection_url = (
+            '/role_assignments?effective&user.id=%(user_id)s'
+            '&scope.project.id=%(project_id)s' % {
+                'user_id': user1['id'],
+                'project_id': project1['id']})
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(r,
+                                                   expected_length=3,
+                                                   resource_url=collection_url)
+        # An effective role for an inherited role will be a project
+        # entity, with a domain link to the inherited assignment
+        up_entity = _build_role_assignment_entity(
+            link=gd_entity['links']['assignment'], project_id=project1['id'],
+            user_id=user1['id'], role_id=role_list[3]['id'],
+            inherited_to_projects=True)
+        self.assertRoleAssignmentInListResponse(r, up_entity)
+
+    def test_filtered_role_assignments_for_inherited_grants(self):
+        """Call ``GET /role_assignments?scope.OS-INHERIT:inherited_to``.
+
+        Test Plan:
+
+        - Create 5 roles
+        - Create a domain with a user, group and two projects
+        - Assign three direct spoiler roles to projects
+        - Issue the URL to add an inherited user role to the domain
+        - Issue the URL to add an inherited group role to the domain
+        - Issue the URL to filter by inherited roles - this should
+          return just the 2 inherited roles.
+
+        """
+        role_list = []
+        for _ in range(5):
+            role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+            self.role_api.create_role(role['id'], role)
+            role_list.append(role)
+
+        domain = self.new_domain_ref()
+        self.resource_api.create_domain(domain['id'], domain)
+        user1 = self.new_user_ref(
+            domain_id=domain['id'])
+        password = user1['password']
+        user1 = self.identity_api.create_user(user1)
+        user1['password'] = password
+        group1 = self.new_group_ref(
+            domain_id=domain['id'])
+        group1 = self.identity_api.create_group(group1)
+        project1 = self.new_project_ref(
+            domain_id=domain['id'])
+        self.resource_api.create_project(project1['id'], project1)
+        project2 = self.new_project_ref(
+            domain_id=domain['id'])
+        self.resource_api.create_project(project2['id'], project2)
+        # Add some spoiler roles to the projects
+        self.assignment_api.add_role_to_user_and_project(
+            user1['id'], project1['id'], role_list[0]['id'])
+        self.assignment_api.add_role_to_user_and_project(
+            user1['id'], project2['id'], role_list[1]['id'])
+        # Create a non-inherited role as a spoiler
+        self.assignment_api.create_grant(
+            role_list[2]['id'], user_id=user1['id'], domain_id=domain['id'])
+
+        # Now create two inherited roles on the domain, one for a user
+        # and one for a domain
+        base_collection_url = (
+            '/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % {
+                'domain_id': domain['id'],
+                'user_id': user1['id']})
+        member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % {
+            'collection_url': base_collection_url,
+            'role_id': role_list[3]['id']}
+        collection_url = base_collection_url + '/inherited_to_projects'
+
+        self.put(member_url)
+        self.head(member_url)
+        r = self.get(collection_url)
+        self.assertValidRoleListResponse(r, ref=role_list[3],
+                                         resource_url=collection_url)
+
+        base_collection_url = (
+            '/OS-INHERIT/domains/%(domain_id)s/groups/%(group_id)s/roles' % {
+                'domain_id': domain['id'],
+                'group_id': group1['id']})
+        member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % {
+            'collection_url': base_collection_url,
+            'role_id': role_list[4]['id']}
+        collection_url = base_collection_url + '/inherited_to_projects'
+
+        self.put(member_url)
+        self.head(member_url)
+        r = self.get(collection_url)
+        self.assertValidRoleListResponse(r, ref=role_list[4],
+                                         resource_url=collection_url)
+
+        # Now use the list role assignments api to get a list of inherited
+        # roles on the domain - should get back the two roles
+        collection_url = (
+            '/role_assignments?scope.OS-INHERIT:inherited_to=projects')
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(r,
+                                                   expected_length=2,
+                                                   resource_url=collection_url)
+        ud_entity = _build_role_assignment_entity(
+            domain_id=domain['id'], user_id=user1['id'],
+            role_id=role_list[3]['id'], inherited_to_projects=True)
+        gd_entity = _build_role_assignment_entity(
+            domain_id=domain['id'], group_id=group1['id'],
+            role_id=role_list[4]['id'], inherited_to_projects=True)
+        self.assertRoleAssignmentInListResponse(r, ud_entity)
+        self.assertRoleAssignmentInListResponse(r, gd_entity)
+
+    def _setup_hierarchical_projects_scenario(self):
+        """Creates basic hierarchical projects scenario.
+
+        This basic scenario contains a root with one leaf project and
+        two roles with the following names: non-inherited and inherited.
+
+        """
+        # Create project hierarchy
+        root = self.new_project_ref(domain_id=self.domain['id'])
+        leaf = self.new_project_ref(domain_id=self.domain['id'],
+                                    parent_id=root['id'])
+
+        self.resource_api.create_project(root['id'], root)
+        self.resource_api.create_project(leaf['id'], leaf)
+
+        # Create 'non-inherited' and 'inherited' roles
+        non_inherited_role = {'id': uuid.uuid4().hex, 'name': 'non-inherited'}
+        self.role_api.create_role(non_inherited_role['id'], non_inherited_role)
+        inherited_role = {'id': uuid.uuid4().hex, 'name': 'inherited'}
+        self.role_api.create_role(inherited_role['id'], inherited_role)
+
+        return (root['id'], leaf['id'],
+                non_inherited_role['id'], inherited_role['id'])
+
+    def test_get_token_from_inherited_user_project_role_grants(self):
+        # Create default scenario
+        root_id, leaf_id, non_inherited_role_id, inherited_role_id = (
+            self._setup_hierarchical_projects_scenario())
+
+        # Define root and leaf projects authentication data
+        root_project_auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=root_id)
+        leaf_project_auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=leaf_id)
+
+        # Check the user cannot get a token on root nor leaf project
+        self.v3_authenticate_token(root_project_auth_data, expected_status=401)
+        self.v3_authenticate_token(leaf_project_auth_data, expected_status=401)
+
+        # Grant non-inherited role for user on leaf project
+        non_inher_up_link = _build_role_assignment_link(
+            project_id=leaf_id, user_id=self.user['id'],
+            role_id=non_inherited_role_id)
+        self.put(non_inher_up_link)
+
+        # Check the user can only get a token on leaf project
+        self.v3_authenticate_token(root_project_auth_data, expected_status=401)
+        self.v3_authenticate_token(leaf_project_auth_data)
+
+        # Grant inherited role for user on root project
+        inher_up_link = _build_role_assignment_link(
+            project_id=root_id, user_id=self.user['id'],
+            role_id=inherited_role_id, inherited_to_projects=True)
+        self.put(inher_up_link)
+
+        # Check the user still can get a token only on leaf project
+        self.v3_authenticate_token(root_project_auth_data, expected_status=401)
+        self.v3_authenticate_token(leaf_project_auth_data)
+
+        # Delete non-inherited grant
+        self.delete(non_inher_up_link)
+
+        # Check the inherited role still applies for leaf project
+        self.v3_authenticate_token(root_project_auth_data, expected_status=401)
+        self.v3_authenticate_token(leaf_project_auth_data)
+
+        # Delete inherited grant
+        self.delete(inher_up_link)
+
+        # Check the user cannot get a token on leaf project anymore
+        self.v3_authenticate_token(leaf_project_auth_data, expected_status=401)
+
+    def test_get_token_from_inherited_group_project_role_grants(self):
+        # Create default scenario
+        root_id, leaf_id, non_inherited_role_id, inherited_role_id = (
+            self._setup_hierarchical_projects_scenario())
+
+        # Create group and add user to it
+        group = self.new_group_ref(domain_id=self.domain['id'])
+        group = self.identity_api.create_group(group)
+        self.identity_api.add_user_to_group(self.user['id'], group['id'])
+
+        # Define root and leaf projects authentication data
+        root_project_auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=root_id)
+        leaf_project_auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=leaf_id)
+
+        # Check the user cannot get a token on root nor leaf project
+        self.v3_authenticate_token(root_project_auth_data, expected_status=401)
+        self.v3_authenticate_token(leaf_project_auth_data, expected_status=401)
+
+        # Grant non-inherited role for group on leaf project
+        non_inher_gp_link = _build_role_assignment_link(
+            project_id=leaf_id, group_id=group['id'],
+            role_id=non_inherited_role_id)
+        self.put(non_inher_gp_link)
+
+        # Check the user can only get a token on leaf project
+        self.v3_authenticate_token(root_project_auth_data, expected_status=401)
+        self.v3_authenticate_token(leaf_project_auth_data)
+
+        # Grant inherited role for group on root project
+        inher_gp_link = _build_role_assignment_link(
+            project_id=root_id, group_id=group['id'],
+            role_id=inherited_role_id, inherited_to_projects=True)
+        self.put(inher_gp_link)
+
+        # Check the user still can get a token only on leaf project
+        self.v3_authenticate_token(root_project_auth_data, expected_status=401)
+        self.v3_authenticate_token(leaf_project_auth_data)
+
+        # Delete no-inherited grant
+        self.delete(non_inher_gp_link)
+
+        # Check the inherited role still applies for leaf project
+        self.v3_authenticate_token(leaf_project_auth_data)
+
+        # Delete inherited grant
+        self.delete(inher_gp_link)
+
+        # Check the user cannot get a token on leaf project anymore
+        self.v3_authenticate_token(leaf_project_auth_data, expected_status=401)
+
+    def test_get_role_assignments_for_project_hierarchy(self):
+        """Call ``GET /role_assignments``.
+
+        Test Plan:
+
+        - Create 2 roles
+        - Create a hierarchy of projects with one root and one leaf project
+        - Issue the URL to add a non-inherited user role to the root project
+        - Issue the URL to add an inherited user role to the root project
+        - Issue the URL to get all role assignments - this should return just
+          2 roles (non-inherited and inherited) in the root project.
+
+        """
+        # Create default scenario
+        root_id, leaf_id, non_inherited_role_id, inherited_role_id = (
+            self._setup_hierarchical_projects_scenario())
+
+        # Grant non-inherited role
+        non_inher_up_entity = _build_role_assignment_entity(
+            project_id=root_id, user_id=self.user['id'],
+            role_id=non_inherited_role_id)
+        self.put(non_inher_up_entity['links']['assignment'])
+
+        # Grant inherited role
+        inher_up_entity = _build_role_assignment_entity(
+            project_id=root_id, user_id=self.user['id'],
+            role_id=inherited_role_id, inherited_to_projects=True)
+        self.put(inher_up_entity['links']['assignment'])
+
+        # Get role assignments
+        collection_url = '/role_assignments'
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(r,
+                                                   resource_url=collection_url)
+
+        # Assert that the user has non-inherited role on root project
+        self.assertRoleAssignmentInListResponse(r, non_inher_up_entity)
+
+        # Assert that the user has inherited role on root project
+        self.assertRoleAssignmentInListResponse(r, inher_up_entity)
+
+        # Assert that the user does not have non-inherited role on leaf project
+        non_inher_up_entity = _build_role_assignment_entity(
+            project_id=leaf_id, user_id=self.user['id'],
+            role_id=non_inherited_role_id)
+        self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity)
+
+        # Assert that the user does not have inherited role on leaf project
+        inher_up_entity['scope']['project']['id'] = leaf_id
+        self.assertRoleAssignmentNotInListResponse(r, inher_up_entity)
+
+    def test_get_effective_role_assignments_for_project_hierarchy(self):
+        """Call ``GET /role_assignments?effective``.
+
+        Test Plan:
+
+        - Create 2 roles
+        - Create a hierarchy of projects with one root and one leaf project
+        - Issue the URL to add a non-inherited user role to the root project
+        - Issue the URL to add an inherited user role to the root project
+        - Issue the URL to get effective role assignments - this should return
+          1 role (non-inherited) on the root project and 1 role (inherited) on
+          the leaf project.
+
+        """
+        # Create default scenario
+        root_id, leaf_id, non_inherited_role_id, inherited_role_id = (
+            self._setup_hierarchical_projects_scenario())
+
+        # Grant non-inherited role
+        non_inher_up_entity = _build_role_assignment_entity(
+            project_id=root_id, user_id=self.user['id'],
+            role_id=non_inherited_role_id)
+        self.put(non_inher_up_entity['links']['assignment'])
+
+        # Grant inherited role
+        inher_up_entity = _build_role_assignment_entity(
+            project_id=root_id, user_id=self.user['id'],
+            role_id=inherited_role_id, inherited_to_projects=True)
+        self.put(inher_up_entity['links']['assignment'])
+
+        # Get effective role assignments
+        collection_url = '/role_assignments?effective'
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(r,
+                                                   resource_url=collection_url)
+
+        # Assert that the user has non-inherited role on root project
+        self.assertRoleAssignmentInListResponse(r, non_inher_up_entity)
+
+        # Assert that the user does not have inherited role on root project
+        self.assertRoleAssignmentNotInListResponse(r, inher_up_entity)
+
+        # Assert that the user does not have non-inherited role on leaf project
+        non_inher_up_entity = _build_role_assignment_entity(
+            project_id=leaf_id, user_id=self.user['id'],
+            role_id=non_inherited_role_id)
+        self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity)
+
+        # Assert that the user has inherited role on leaf project
+        inher_up_entity['scope']['project']['id'] = leaf_id
+        self.assertRoleAssignmentInListResponse(r, inher_up_entity)
+
+    def test_get_inherited_role_assignments_for_project_hierarchy(self):
+        """Call ``GET /role_assignments?scope.OS-INHERIT:inherited_to``.
+
+        Test Plan:
+
+        - Create 2 roles
+        - Create a hierarchy of projects with one root and one leaf project
+        - Issue the URL to add a non-inherited user role to the root project
+        - Issue the URL to add an inherited user role to the root project
+        - Issue the URL to filter inherited to projects role assignments - this
+          should return 1 role (inherited) on the root project.
+
+        """
+        # Create default scenario
+        root_id, leaf_id, non_inherited_role_id, inherited_role_id = (
+            self._setup_hierarchical_projects_scenario())
+
+        # Grant non-inherited role
+        non_inher_up_entity = _build_role_assignment_entity(
+            project_id=root_id, user_id=self.user['id'],
+            role_id=non_inherited_role_id)
+        self.put(non_inher_up_entity['links']['assignment'])
+
+        # Grant inherited role
+        inher_up_entity = _build_role_assignment_entity(
+            project_id=root_id, user_id=self.user['id'],
+            role_id=inherited_role_id, inherited_to_projects=True)
+        self.put(inher_up_entity['links']['assignment'])
+
+        # Get inherited role assignments
+        collection_url = ('/role_assignments'
+                          '?scope.OS-INHERIT:inherited_to=projects')
+        r = self.get(collection_url)
+        self.assertValidRoleAssignmentListResponse(r,
+                                                   resource_url=collection_url)
+
+        # Assert that the user does not have non-inherited role on root project
+        self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity)
+
+        # Assert that the user has inherited role on root project
+        self.assertRoleAssignmentInListResponse(r, inher_up_entity)
+
+        # Assert that the user does not have non-inherited role on leaf project
+        non_inher_up_entity = _build_role_assignment_entity(
+            project_id=leaf_id, user_id=self.user['id'],
+            role_id=non_inherited_role_id)
+        self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity)
+
+        # Assert that the user does not have inherited role on leaf project
+        inher_up_entity['scope']['project']['id'] = leaf_id
+        self.assertRoleAssignmentNotInListResponse(r, inher_up_entity)
+
+
+class AssignmentInheritanceDisabledTestCase(test_v3.RestfulTestCase):
+    """Test inheritance crud and its effects."""
+
+    def config_overrides(self):
+        super(AssignmentInheritanceDisabledTestCase, self).config_overrides()
+        self.config_fixture.config(group='os_inherit', enabled=False)
+
+    def test_crud_inherited_role_grants_failed_if_disabled(self):
+        role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.role_api.create_role(role['id'], role)
+
+        base_collection_url = (
+            '/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % {
+                'domain_id': self.domain_id,
+                'user_id': self.user['id']})
+        member_url = '%(collection_url)s/%(role_id)s/inherited_to_projects' % {
+            'collection_url': base_collection_url,
+            'role_id': role['id']}
+        collection_url = base_collection_url + '/inherited_to_projects'
+
+        self.put(member_url, expected_status=404)
+        self.head(member_url, expected_status=404)
+        self.get(collection_url, expected_status=404)
+        self.delete(member_url, expected_status=404)
+
+
+class AssignmentV3toV2MethodsTestCase(tests.TestCase):
+    """Test domain V3 to V2 conversion methods."""
+
+    def test_v2controller_filter_domain_id(self):
+        # V2.0 is not domain aware, ensure domain_id is popped off the ref.
+        other_data = uuid.uuid4().hex
+        domain_id = uuid.uuid4().hex
+        ref = {'domain_id': domain_id,
+               'other_data': other_data}
+
+        ref_no_domain = {'other_data': other_data}
+        expected_ref = ref_no_domain.copy()
+
+        updated_ref = controller.V2Controller.filter_domain_id(ref)
+        self.assertIs(ref, updated_ref)
+        self.assertDictEqual(ref, expected_ref)
+        # Make sure we don't error/muck up data if domain_id isn't present
+        updated_ref = controller.V2Controller.filter_domain_id(ref_no_domain)
+        self.assertIs(ref_no_domain, updated_ref)
+        self.assertDictEqual(ref_no_domain, expected_ref)
+
+    def test_v3controller_filter_domain_id(self):
+        # No data should be filtered out in this case.
+        other_data = uuid.uuid4().hex
+        domain_id = uuid.uuid4().hex
+        ref = {'domain_id': domain_id,
+               'other_data': other_data}
+
+        expected_ref = ref.copy()
+        updated_ref = controller.V3Controller.filter_domain_id(ref)
+        self.assertIs(ref, updated_ref)
+        self.assertDictEqual(ref, expected_ref)
+
+    def test_v2controller_filter_domain(self):
+        other_data = uuid.uuid4().hex
+        domain_id = uuid.uuid4().hex
+        non_default_domain_ref = {'domain': {'id': domain_id},
+                                  'other_data': other_data}
+        default_domain_ref = {'domain': {'id': 'default'},
+                              'other_data': other_data}
+        updated_ref = controller.V2Controller.filter_domain(default_domain_ref)
+        self.assertNotIn('domain', updated_ref)
+        self.assertRaises(exception.Unauthorized,
+                          controller.V2Controller.filter_domain,
+                          non_default_domain_ref)
diff --git a/keystone-moon/keystone/tests/unit/test_v3_auth.py b/keystone-moon/keystone/tests/unit/test_v3_auth.py
new file mode 100644 (file)
index 0000000..ec07917
--- /dev/null
@@ -0,0 +1,4494 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import datetime
+import json
+import operator
+import uuid
+
+from keystoneclient.common import cms
+import mock
+from oslo_config import cfg
+from oslo_utils import timeutils
+import six
+from testtools import matchers
+from testtools import testcase
+
+from keystone import auth
+from keystone import exception
+from keystone.policy.backends import rules
+from keystone.tests import unit as tests
+from keystone.tests.unit import ksfixtures
+from keystone.tests.unit import test_v3
+
+
+CONF = cfg.CONF
+
+
+class TestAuthInfo(test_v3.AuthTestMixin, testcase.TestCase):
+    def setUp(self):
+        super(TestAuthInfo, self).setUp()
+        auth.controllers.load_auth_methods()
+
+    def test_missing_auth_methods(self):
+        auth_data = {'identity': {}}
+        auth_data['identity']['token'] = {'id': uuid.uuid4().hex}
+        self.assertRaises(exception.ValidationError,
+                          auth.controllers.AuthInfo.create,
+                          None,
+                          auth_data)
+
+    def test_unsupported_auth_method(self):
+        auth_data = {'methods': ['abc']}
+        auth_data['abc'] = {'test': 'test'}
+        auth_data = {'identity': auth_data}
+        self.assertRaises(exception.AuthMethodNotSupported,
+                          auth.controllers.AuthInfo.create,
+                          None,
+                          auth_data)
+
+    def test_missing_auth_method_data(self):
+        auth_data = {'methods': ['password']}
+        auth_data = {'identity': auth_data}
+        self.assertRaises(exception.ValidationError,
+                          auth.controllers.AuthInfo.create,
+                          None,
+                          auth_data)
+
+    def test_project_name_no_domain(self):
+        auth_data = self.build_authentication_request(
+            username='test',
+            password='test',
+            project_name='abc')['auth']
+        self.assertRaises(exception.ValidationError,
+                          auth.controllers.AuthInfo.create,
+                          None,
+                          auth_data)
+
+    def test_both_project_and_domain_in_scope(self):
+        auth_data = self.build_authentication_request(
+            user_id='test',
+            password='test',
+            project_name='test',
+            domain_name='test')['auth']
+        self.assertRaises(exception.ValidationError,
+                          auth.controllers.AuthInfo.create,
+                          None,
+                          auth_data)
+
+    def test_get_method_names_duplicates(self):
+        auth_data = self.build_authentication_request(
+            token='test',
+            user_id='test',
+            password='test')['auth']
+        auth_data['identity']['methods'] = ['password', 'token',
+                                            'password', 'password']
+        context = None
+        auth_info = auth.controllers.AuthInfo.create(context, auth_data)
+        self.assertEqual(auth_info.get_method_names(),
+                         ['password', 'token'])
+
+    def test_get_method_data_invalid_method(self):
+        auth_data = self.build_authentication_request(
+            user_id='test',
+            password='test')['auth']
+        context = None
+        auth_info = auth.controllers.AuthInfo.create(context, auth_data)
+
+        method_name = uuid.uuid4().hex
+        self.assertRaises(exception.ValidationError,
+                          auth_info.get_method_data,
+                          method_name)
+
+
+class TokenAPITests(object):
+    # Why is this not just setUP? Because TokenAPITests is not a test class
+    # itself. If TokenAPITests became a subclass of the testcase, it would get
+    # called by the enumerate-tests-in-file code. The way the functions get
+    # resolved in Python for multiple inheritance means that a setUp in this
+    # would get skipped by the testrunner.
+    def doSetUp(self):
+        auth_data = self.build_authentication_request(
+            username=self.user['name'],
+            user_domain_id=self.domain_id,
+            password=self.user['password'])
+        resp = self.v3_authenticate_token(auth_data)
+        self.token_data = resp.result
+        self.token = resp.headers.get('X-Subject-Token')
+        self.headers = {'X-Subject-Token': resp.headers.get('X-Subject-Token')}
+
+    def test_default_fixture_scope_token(self):
+        self.assertIsNotNone(self.get_scoped_token())
+
+    def verify_token(self, *args, **kwargs):
+        return cms.verify_token(*args, **kwargs)
+
+    def test_v3_token_id(self):
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'])
+        resp = self.v3_authenticate_token(auth_data)
+        token_data = resp.result
+        token_id = resp.headers.get('X-Subject-Token')
+        self.assertIn('expires_at', token_data['token'])
+
+        decoded_token = self.verify_token(token_id, CONF.signing.certfile,
+                                          CONF.signing.ca_certs)
+        decoded_token_dict = json.loads(decoded_token)
+
+        token_resp_dict = json.loads(resp.body)
+
+        self.assertEqual(decoded_token_dict, token_resp_dict)
+        # should be able to validate hash PKI token as well
+        hash_token_id = cms.cms_hash_token(token_id)
+        headers = {'X-Subject-Token': hash_token_id}
+        resp = self.get('/auth/tokens', headers=headers)
+        expected_token_data = resp.result
+        self.assertDictEqual(expected_token_data, token_data)
+
+    def test_v3_v2_intermix_non_default_domain_failed(self):
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'])
+        token = self.get_requested_token(auth_data)
+
+        # now validate the v3 token with v2 API
+        path = '/v2.0/tokens/%s' % (token)
+        self.admin_request(path=path,
+                           token='ADMIN',
+                           method='GET',
+                           expected_status=401)
+
+    def test_v3_v2_intermix_new_default_domain(self):
+        # If the default_domain_id config option is changed, then should be
+        # able to validate a v3 token with user in the new domain.
+
+        # 1) Create a new domain for the user.
+        new_domain_id = uuid.uuid4().hex
+        new_domain = {
+            'description': uuid.uuid4().hex,
+            'enabled': True,
+            'id': new_domain_id,
+            'name': uuid.uuid4().hex,
+        }
+
+        self.resource_api.create_domain(new_domain_id, new_domain)
+
+        # 2) Create user in new domain.
+        new_user_password = uuid.uuid4().hex
+        new_user = {
+            'name': uuid.uuid4().hex,
+            'domain_id': new_domain_id,
+            'password': new_user_password,
+            'email': uuid.uuid4().hex,
+        }
+
+        new_user = self.identity_api.create_user(new_user)
+
+        # 3) Update the default_domain_id config option to the new domain
+
+        self.config_fixture.config(group='identity',
+                                   default_domain_id=new_domain_id)
+
+        # 4) Get a token using v3 api.
+
+        auth_data = self.build_authentication_request(
+            user_id=new_user['id'],
+            password=new_user_password)
+        token = self.get_requested_token(auth_data)
+
+        # 5) Authenticate token using v2 api.
+
+        path = '/v2.0/tokens/%s' % (token)
+        self.admin_request(path=path,
+                           token='ADMIN',
+                           method='GET')
+
+    def test_v3_v2_intermix_domain_scoped_token_failed(self):
+        # grant the domain role to user
+        path = '/domains/%s/users/%s/roles/%s' % (
+            self.domain['id'], self.user['id'], self.role['id'])
+        self.put(path=path)
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            domain_id=self.domain['id'])
+        token = self.get_requested_token(auth_data)
+
+        # now validate the v3 token with v2 API
+        path = '/v2.0/tokens/%s' % (token)
+        self.admin_request(path=path,
+                           token='ADMIN',
+                           method='GET',
+                           expected_status=401)
+
+    def test_v3_v2_intermix_non_default_project_failed(self):
+        auth_data = self.build_authentication_request(
+            user_id=self.default_domain_user['id'],
+            password=self.default_domain_user['password'],
+            project_id=self.project['id'])
+        token = self.get_requested_token(auth_data)
+
+        # now validate the v3 token with v2 API
+        path = '/v2.0/tokens/%s' % (token)
+        self.admin_request(path=path,
+                           token='ADMIN',
+                           method='GET',
+                           expected_status=401)
+
+    def test_v3_v2_unscoped_token_intermix(self):
+        auth_data = self.build_authentication_request(
+            user_id=self.default_domain_user['id'],
+            password=self.default_domain_user['password'])
+        resp = self.v3_authenticate_token(auth_data)
+        token_data = resp.result
+        token = resp.headers.get('X-Subject-Token')
+
+        # now validate the v3 token with v2 API
+        path = '/v2.0/tokens/%s' % (token)
+        resp = self.admin_request(path=path,
+                                  token='ADMIN',
+                                  method='GET')
+        v2_token = resp.result
+        self.assertEqual(v2_token['access']['user']['id'],
+                         token_data['token']['user']['id'])
+        # v2 token time has not fraction of second precision so
+        # just need to make sure the non fraction part agrees
+        self.assertIn(v2_token['access']['token']['expires'][:-1],
+                      token_data['token']['expires_at'])
+
+    def test_v3_v2_token_intermix(self):
+        # FIXME(gyee): PKI tokens are not interchangeable because token
+        # data is baked into the token itself.
+        auth_data = self.build_authentication_request(
+            user_id=self.default_domain_user['id'],
+            password=self.default_domain_user['password'],
+            project_id=self.default_domain_project['id'])
+        resp = self.v3_authenticate_token(auth_data)
+        token_data = resp.result
+        token = resp.headers.get('X-Subject-Token')
+
+        # now validate the v3 token with v2 API
+        path = '/v2.0/tokens/%s' % (token)
+        resp = self.admin_request(path=path,
+                                  token='ADMIN',
+                                  method='GET')
+        v2_token = resp.result
+        self.assertEqual(v2_token['access']['user']['id'],
+                         token_data['token']['user']['id'])
+        # v2 token time has not fraction of second precision so
+        # just need to make sure the non fraction part agrees
+        self.assertIn(v2_token['access']['token']['expires'][:-1],
+                      token_data['token']['expires_at'])
+        self.assertEqual(v2_token['access']['user']['roles'][0]['id'],
+                         token_data['token']['roles'][0]['id'])
+
+    def test_v3_v2_hashed_pki_token_intermix(self):
+        auth_data = self.build_authentication_request(
+            user_id=self.default_domain_user['id'],
+            password=self.default_domain_user['password'],
+            project_id=self.default_domain_project['id'])
+        resp = self.v3_authenticate_token(auth_data)
+        token_data = resp.result
+        token = resp.headers.get('X-Subject-Token')
+
+        # should be able to validate a hash PKI token in v2 too
+        token = cms.cms_hash_token(token)
+        path = '/v2.0/tokens/%s' % (token)
+        resp = self.admin_request(path=path,
+                                  token='ADMIN',
+                                  method='GET')
+        v2_token = resp.result
+        self.assertEqual(v2_token['access']['user']['id'],
+                         token_data['token']['user']['id'])
+        # v2 token time has not fraction of second precision so
+        # just need to make sure the non fraction part agrees
+        self.assertIn(v2_token['access']['token']['expires'][:-1],
+                      token_data['token']['expires_at'])
+        self.assertEqual(v2_token['access']['user']['roles'][0]['id'],
+                         token_data['token']['roles'][0]['id'])
+
+    def test_v2_v3_unscoped_token_intermix(self):
+        body = {
+            'auth': {
+                'passwordCredentials': {
+                    'userId': self.user['id'],
+                    'password': self.user['password']
+                }
+            }}
+        resp = self.admin_request(path='/v2.0/tokens',
+                                  method='POST',
+                                  body=body)
+        v2_token_data = resp.result
+        v2_token = v2_token_data['access']['token']['id']
+        headers = {'X-Subject-Token': v2_token}
+        resp = self.get('/auth/tokens', headers=headers)
+        token_data = resp.result
+        self.assertEqual(v2_token_data['access']['user']['id'],
+                         token_data['token']['user']['id'])
+        # v2 token time has not fraction of second precision so
+        # just need to make sure the non fraction part agrees
+        self.assertIn(v2_token_data['access']['token']['expires'][-1],
+                      token_data['token']['expires_at'])
+
+    def test_v2_v3_token_intermix(self):
+        body = {
+            'auth': {
+                'passwordCredentials': {
+                    'userId': self.user['id'],
+                    'password': self.user['password']
+                },
+                'tenantId': self.project['id']
+            }}
+        resp = self.admin_request(path='/v2.0/tokens',
+                                  method='POST',
+                                  body=body)
+        v2_token_data = resp.result
+        v2_token = v2_token_data['access']['token']['id']
+        headers = {'X-Subject-Token': v2_token}
+        resp = self.get('/auth/tokens', headers=headers)
+        token_data = resp.result
+        self.assertEqual(v2_token_data['access']['user']['id'],
+                         token_data['token']['user']['id'])
+        # v2 token time has not fraction of second precision so
+        # just need to make sure the non fraction part agrees
+        self.assertIn(v2_token_data['access']['token']['expires'][-1],
+                      token_data['token']['expires_at'])
+        self.assertEqual(v2_token_data['access']['user']['roles'][0]['name'],
+                         token_data['token']['roles'][0]['name'])
+
+        v2_issued_at = timeutils.parse_isotime(
+            v2_token_data['access']['token']['issued_at'])
+        v3_issued_at = timeutils.parse_isotime(
+            token_data['token']['issued_at'])
+
+        self.assertEqual(v2_issued_at, v3_issued_at)
+
+    def test_rescoping_token(self):
+        expires = self.token_data['token']['expires_at']
+        auth_data = self.build_authentication_request(
+            token=self.token,
+            project_id=self.project_id)
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidProjectScopedTokenResponse(r)
+        # make sure expires stayed the same
+        self.assertEqual(expires, r.result['token']['expires_at'])
+
+    def test_check_token(self):
+        self.head('/auth/tokens', headers=self.headers, expected_status=200)
+
+    def test_validate_token(self):
+        r = self.get('/auth/tokens', headers=self.headers)
+        self.assertValidUnscopedTokenResponse(r)
+
+    def test_validate_token_nocatalog(self):
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=self.project['id'])
+        headers = {'X-Subject-Token': self.get_requested_token(auth_data)}
+        r = self.get('/auth/tokens?nocatalog', headers=headers)
+        self.assertValidProjectScopedTokenResponse(r, require_catalog=False)
+
+
+class AllowRescopeScopedTokenDisabledTests(test_v3.RestfulTestCase):
+    def config_overrides(self):
+        super(AllowRescopeScopedTokenDisabledTests, self).config_overrides()
+        self.config_fixture.config(
+            group='token',
+            allow_rescope_scoped_token=False)
+
+    def test_rescoping_v3_to_v3_disabled(self):
+        self.v3_authenticate_token(
+            self.build_authentication_request(
+                token=self.get_scoped_token(),
+                project_id=self.project_id),
+            expected_status=403)
+
+    def _v2_token(self):
+        body = {
+            'auth': {
+                "tenantId": self.project['id'],
+                'passwordCredentials': {
+                    'userId': self.user['id'],
+                    'password': self.user['password']
+                }
+            }}
+        resp = self.admin_request(path='/v2.0/tokens',
+                                  method='POST',
+                                  body=body)
+        v2_token_data = resp.result
+        return v2_token_data
+
+    def _v2_token_from_token(self, token):
+        body = {
+            'auth': {
+                "tenantId": self.project['id'],
+                "token": token
+            }}
+        self.admin_request(path='/v2.0/tokens',
+                           method='POST',
+                           body=body,
+                           expected_status=403)
+
+    def test_rescoping_v2_to_v3_disabled(self):
+        token = self._v2_token()
+        self.v3_authenticate_token(
+            self.build_authentication_request(
+                token=token['access']['token']['id'],
+                project_id=self.project_id),
+            expected_status=403)
+
+    def test_rescoping_v3_to_v2_disabled(self):
+        token = {'id': self.get_scoped_token()}
+        self._v2_token_from_token(token)
+
+    def test_rescoping_v2_to_v2_disabled(self):
+        token = self._v2_token()
+        self._v2_token_from_token(token['access']['token'])
+
+    def test_rescoped_domain_token_disabled(self):
+
+        self.domainA = self.new_domain_ref()
+        self.assignment_api.create_domain(self.domainA['id'], self.domainA)
+        self.assignment_api.create_grant(self.role['id'],
+                                         user_id=self.user['id'],
+                                         domain_id=self.domainA['id'])
+        unscoped_token = self.get_requested_token(
+            self.build_authentication_request(
+                user_id=self.user['id'],
+                password=self.user['password']))
+        # Get a domain-scoped token from the unscoped token
+        domain_scoped_token = self.get_requested_token(
+            self.build_authentication_request(
+                token=unscoped_token,
+                domain_id=self.domainA['id']))
+        self.v3_authenticate_token(
+            self.build_authentication_request(
+                token=domain_scoped_token,
+                project_id=self.project_id),
+            expected_status=403)
+
+
+class TestPKITokenAPIs(test_v3.RestfulTestCase, TokenAPITests):
+    def config_overrides(self):
+        super(TestPKITokenAPIs, self).config_overrides()
+        self.config_fixture.config(
+            group='token',
+            provider='keystone.token.providers.pki.Provider')
+
+    def setUp(self):
+        super(TestPKITokenAPIs, self).setUp()
+        self.doSetUp()
+
+
+class TestPKIZTokenAPIs(test_v3.RestfulTestCase, TokenAPITests):
+
+    def verify_token(self, *args, **kwargs):
+        return cms.pkiz_verify(*args, **kwargs)
+
+    def config_overrides(self):
+        super(TestPKIZTokenAPIs, self).config_overrides()
+        self.config_fixture.config(
+            group='token',
+            provider='keystone.token.providers.pkiz.Provider')
+
+    def setUp(self):
+        super(TestPKIZTokenAPIs, self).setUp()
+        self.doSetUp()
+
+
+class TestUUIDTokenAPIs(test_v3.RestfulTestCase, TokenAPITests):
+    def config_overrides(self):
+        super(TestUUIDTokenAPIs, self).config_overrides()
+        self.config_fixture.config(
+            group='token',
+            provider='keystone.token.providers.uuid.Provider')
+
+    def setUp(self):
+        super(TestUUIDTokenAPIs, self).setUp()
+        self.doSetUp()
+
+    def test_v3_token_id(self):
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'])
+        resp = self.v3_authenticate_token(auth_data)
+        token_data = resp.result
+        token_id = resp.headers.get('X-Subject-Token')
+        self.assertIn('expires_at', token_data['token'])
+        self.assertFalse(cms.is_asn1_token(token_id))
+
+    def test_v3_v2_hashed_pki_token_intermix(self):
+        # this test is only applicable for PKI tokens
+        # skipping it for UUID tokens
+        pass
+
+
+class TestTokenRevokeSelfAndAdmin(test_v3.RestfulTestCase):
+    """Test token revoke using v3 Identity API by token owner and admin."""
+
+    def load_sample_data(self):
+        """Load Sample Data for Test Cases.
+
+        Two domains, domainA and domainB
+        Two users in domainA, userNormalA and userAdminA
+        One user in domainB, userAdminB
+
+        """
+        super(TestTokenRevokeSelfAndAdmin, self).load_sample_data()
+        # DomainA setup
+        self.domainA = self.new_domain_ref()
+        self.resource_api.create_domain(self.domainA['id'], self.domainA)
+
+        self.userAdminA = self.new_user_ref(domain_id=self.domainA['id'])
+        password = self.userAdminA['password']
+        self.userAdminA = self.identity_api.create_user(self.userAdminA)
+        self.userAdminA['password'] = password
+
+        self.userNormalA = self.new_user_ref(
+            domain_id=self.domainA['id'])
+        password = self.userNormalA['password']
+        self.userNormalA = self.identity_api.create_user(self.userNormalA)
+        self.userNormalA['password'] = password
+
+        self.assignment_api.create_grant(self.role['id'],
+                                         user_id=self.userAdminA['id'],
+                                         domain_id=self.domainA['id'])
+
+    def config_overrides(self):
+        super(TestTokenRevokeSelfAndAdmin, self).config_overrides()
+        self.config_fixture.config(
+            group='oslo_policy',
+            policy_file=tests.dirs.etc('policy.v3cloudsample.json'))
+
+    def test_user_revokes_own_token(self):
+        user_token = self.get_requested_token(
+            self.build_authentication_request(
+                user_id=self.userNormalA['id'],
+                password=self.userNormalA['password'],
+                user_domain_id=self.domainA['id']))
+        self.assertNotEmpty(user_token)
+        headers = {'X-Subject-Token': user_token}
+
+        adminA_token = self.get_requested_token(
+            self.build_authentication_request(
+                user_id=self.userAdminA['id'],
+                password=self.userAdminA['password'],
+                domain_name=self.domainA['name']))
+
+        self.head('/auth/tokens', headers=headers, expected_status=200,
+                  token=adminA_token)
+        self.head('/auth/tokens', headers=headers, expected_status=200,
+                  token=user_token)
+        self.delete('/auth/tokens', headers=headers, expected_status=204,
+                    token=user_token)
+        # invalid X-Auth-Token and invalid X-Subject-Token (401)
+        self.head('/auth/tokens', headers=headers, expected_status=401,
+                  token=user_token)
+        # invalid X-Auth-Token and invalid X-Subject-Token (401)
+        self.delete('/auth/tokens', headers=headers, expected_status=401,
+                    token=user_token)
+        # valid X-Auth-Token and invalid X-Subject-Token (404)
+        self.delete('/auth/tokens', headers=headers, expected_status=404,
+                    token=adminA_token)
+        # valid X-Auth-Token and invalid X-Subject-Token (404)
+        self.head('/auth/tokens', headers=headers, expected_status=404,
+                  token=adminA_token)
+
+    def test_adminA_revokes_userA_token(self):
+        user_token = self.get_requested_token(
+            self.build_authentication_request(
+                user_id=self.userNormalA['id'],
+                password=self.userNormalA['password'],
+                user_domain_id=self.domainA['id']))
+        self.assertNotEmpty(user_token)
+        headers = {'X-Subject-Token': user_token}
+
+        adminA_token = self.get_requested_token(
+            self.build_authentication_request(
+                user_id=self.userAdminA['id'],
+                password=self.userAdminA['password'],
+                domain_name=self.domainA['name']))
+
+        self.head('/auth/tokens', headers=headers, expected_status=200,
+                  token=adminA_token)
+        self.head('/auth/tokens', headers=headers, expected_status=200,
+                  token=user_token)
+        self.delete('/auth/tokens', headers=headers, expected_status=204,
+                    token=adminA_token)
+        # invalid X-Auth-Token and invalid X-Subject-Token (401)
+        self.head('/auth/tokens', headers=headers, expected_status=401,
+                  token=user_token)
+        # valid X-Auth-Token and invalid X-Subject-Token (404)
+        self.delete('/auth/tokens', headers=headers, expected_status=404,
+                    token=adminA_token)
+        # valid X-Auth-Token and invalid X-Subject-Token (404)
+        self.head('/auth/tokens', headers=headers, expected_status=404,
+                  token=adminA_token)
+
+    def test_adminB_fails_revoking_userA_token(self):
+        # DomainB setup
+        self.domainB = self.new_domain_ref()
+        self.resource_api.create_domain(self.domainB['id'], self.domainB)
+        self.userAdminB = self.new_user_ref(domain_id=self.domainB['id'])
+        password = self.userAdminB['password']
+        self.userAdminB = self.identity_api.create_user(self.userAdminB)
+        self.userAdminB['password'] = password
+        self.assignment_api.create_grant(self.role['id'],
+                                         user_id=self.userAdminB['id'],
+                                         domain_id=self.domainB['id'])
+
+        user_token = self.get_requested_token(
+            self.build_authentication_request(
+                user_id=self.userNormalA['id'],
+                password=self.userNormalA['password'],
+                user_domain_id=self.domainA['id']))
+        headers = {'X-Subject-Token': user_token}
+
+        adminB_token = self.get_requested_token(
+            self.build_authentication_request(
+                user_id=self.userAdminB['id'],
+                password=self.userAdminB['password'],
+                domain_name=self.domainB['name']))
+
+        self.head('/auth/tokens', headers=headers, expected_status=403,
+                  token=adminB_token)
+        self.delete('/auth/tokens', headers=headers, expected_status=403,
+                    token=adminB_token)
+
+
+class TestTokenRevokeById(test_v3.RestfulTestCase):
+    """Test token revocation on the v3 Identity API."""
+
+    def config_overrides(self):
+        super(TestTokenRevokeById, self).config_overrides()
+        self.config_fixture.config(
+            group='revoke',
+            driver='keystone.contrib.revoke.backends.kvs.Revoke')
+        self.config_fixture.config(
+            group='token',
+            provider='keystone.token.providers.pki.Provider',
+            revoke_by_id=False)
+
+    def setUp(self):
+        """Setup for Token Revoking Test Cases.
+
+        As well as the usual housekeeping, create a set of domains,
+        users, groups, roles and projects for the subsequent tests:
+
+        - Two domains: A & B
+        - Three users (1, 2 and 3)
+        - Three groups (1, 2 and 3)
+        - Two roles (1 and 2)
+        - DomainA owns user1, domainB owns user2 and user3
+        - DomainA owns group1 and group2, domainB owns group3
+        - User1 and user2 are members of group1
+        - User3 is a member of group2
+        - Two projects: A & B, both in domainA
+        - Group1 has role1 on Project A and B, meaning that user1 and user2
+          will get these roles by virtue of membership
+        - User1, 2 and 3 have role1 assigned to projectA
+        - Group1 has role1 on Project A and B, meaning that user1 and user2
+          will get role1 (duplicated) by virtue of membership
+        - User1 has role2 assigned to domainA
+
+        """
+        super(TestTokenRevokeById, self).setUp()
+
+        # Start by creating a couple of domains and projects
+        self.domainA = self.new_domain_ref()
+        self.resource_api.create_domain(self.domainA['id'], self.domainA)
+        self.domainB = self.new_domain_ref()
+        self.resource_api.create_domain(self.domainB['id'], self.domainB)
+        self.projectA = self.new_project_ref(domain_id=self.domainA['id'])
+        self.resource_api.create_project(self.projectA['id'], self.projectA)
+        self.projectB = self.new_project_ref(domain_id=self.domainA['id'])
+        self.resource_api.create_project(self.projectB['id'], self.projectB)
+
+        # Now create some users
+        self.user1 = self.new_user_ref(
+            domain_id=self.domainA['id'])
+        password = self.user1['password']
+        self.user1 = self.identity_api.create_user(self.user1)
+        self.user1['password'] = password
+
+        self.user2 = self.new_user_ref(
+            domain_id=self.domainB['id'])
+        password = self.user2['password']
+        self.user2 = self.identity_api.create_user(self.user2)
+        self.user2['password'] = password
+
+        self.user3 = self.new_user_ref(
+            domain_id=self.domainB['id'])
+        password = self.user3['password']
+        self.user3 = self.identity_api.create_user(self.user3)
+        self.user3['password'] = password
+
+        self.group1 = self.new_group_ref(
+            domain_id=self.domainA['id'])
+        self.group1 = self.identity_api.create_group(self.group1)
+
+        self.group2 = self.new_group_ref(
+            domain_id=self.domainA['id'])
+        self.group2 = self.identity_api.create_group(self.group2)
+
+        self.group3 = self.new_group_ref(
+            domain_id=self.domainB['id'])
+        self.group3 = self.identity_api.create_group(self.group3)
+
+        self.identity_api.add_user_to_group(self.user1['id'],
+                                            self.group1['id'])
+        self.identity_api.add_user_to_group(self.user2['id'],
+                                            self.group1['id'])
+        self.identity_api.add_user_to_group(self.user3['id'],
+                                            self.group2['id'])
+
+        self.role1 = self.new_role_ref()
+        self.role_api.create_role(self.role1['id'], self.role1)
+        self.role2 = self.new_role_ref()
+        self.role_api.create_role(self.role2['id'], self.role2)
+
+        self.assignment_api.create_grant(self.role2['id'],
+                                         user_id=self.user1['id'],
+                                         domain_id=self.domainA['id'])
+        self.assignment_api.create_grant(self.role1['id'],
+                                         user_id=self.user1['id'],
+                                         project_id=self.projectA['id'])
+        self.assignment_api.create_grant(self.role1['id'],
+                                         user_id=self.user2['id'],
+                                         project_id=self.projectA['id'])
+        self.assignment_api.create_grant(self.role1['id'],
+                                         user_id=self.user3['id'],
+                                         project_id=self.projectA['id'])
+        self.assignment_api.create_grant(self.role1['id'],
+                                         group_id=self.group1['id'],
+                                         project_id=self.projectA['id'])
+
+    def test_unscoped_token_remains_valid_after_role_assignment(self):
+        unscoped_token = self.get_requested_token(
+            self.build_authentication_request(
+                user_id=self.user1['id'],
+                password=self.user1['password']))
+
+        scoped_token = self.get_requested_token(
+            self.build_authentication_request(
+                token=unscoped_token,
+                project_id=self.projectA['id']))
+
+        # confirm both tokens are valid
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': unscoped_token},
+                  expected_status=200)
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': scoped_token},
+                  expected_status=200)
+
+        # create a new role
+        role = self.new_role_ref()
+        self.role_api.create_role(role['id'], role)
+
+        # assign a new role
+        self.put(
+            '/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
+                'project_id': self.projectA['id'],
+                'user_id': self.user1['id'],
+                'role_id': role['id']})
+
+        # both tokens should remain valid
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': unscoped_token},
+                  expected_status=200)
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': scoped_token},
+                  expected_status=200)
+
+    def test_deleting_user_grant_revokes_token(self):
+        """Test deleting a user grant revokes token.
+
+        Test Plan:
+
+        - Get a token for user1, scoped to ProjectA
+        - Delete the grant user1 has on ProjectA
+        - Check token is no longer valid
+
+        """
+        auth_data = self.build_authentication_request(
+            user_id=self.user1['id'],
+            password=self.user1['password'],
+            project_id=self.projectA['id'])
+        token = self.get_requested_token(auth_data)
+        # Confirm token is valid
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token},
+                  expected_status=200)
+        # Delete the grant, which should invalidate the token
+        grant_url = (
+            '/projects/%(project_id)s/users/%(user_id)s/'
+            'roles/%(role_id)s' % {
+                'project_id': self.projectA['id'],
+                'user_id': self.user1['id'],
+                'role_id': self.role1['id']})
+        self.delete(grant_url)
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token},
+                  expected_status=404)
+
+    def role_data_fixtures(self):
+        self.projectC = self.new_project_ref(domain_id=self.domainA['id'])
+        self.resource_api.create_project(self.projectC['id'], self.projectC)
+        self.user4 = self.new_user_ref(domain_id=self.domainB['id'])
+        password = self.user4['password']
+        self.user4 = self.identity_api.create_user(self.user4)
+        self.user4['password'] = password
+        self.user5 = self.new_user_ref(
+            domain_id=self.domainA['id'])
+        password = self.user5['password']
+        self.user5 = self.identity_api.create_user(self.user5)
+        self.user5['password'] = password
+        self.user6 = self.new_user_ref(
+            domain_id=self.domainA['id'])
+        password = self.user6['password']
+        self.user6 = self.identity_api.create_user(self.user6)
+        self.user6['password'] = password
+        self.identity_api.add_user_to_group(self.user5['id'],
+                                            self.group1['id'])
+        self.assignment_api.create_grant(self.role1['id'],
+                                         group_id=self.group1['id'],
+                                         project_id=self.projectB['id'])
+        self.assignment_api.create_grant(self.role2['id'],
+                                         user_id=self.user4['id'],
+                                         project_id=self.projectC['id'])
+        self.assignment_api.create_grant(self.role1['id'],
+                                         user_id=self.user6['id'],
+                                         project_id=self.projectA['id'])
+        self.assignment_api.create_grant(self.role1['id'],
+                                         user_id=self.user6['id'],
+                                         domain_id=self.domainA['id'])
+
+    def test_deleting_role_revokes_token(self):
+        """Test deleting a role revokes token.
+
+            Add some additional test data, namely:
+             - A third project (project C)
+             - Three additional users - user4 owned by domainB and user5 and 6
+               owned by domainA (different domain ownership should not affect
+               the test results, just provided to broaden test coverage)
+             - User5 is a member of group1
+             - Group1 gets an additional assignment - role1 on projectB as
+               well as its existing role1 on projectA
+             - User4 has role2 on Project C
+             - User6 has role1 on projectA and domainA
+             - This allows us to create 5 tokens by virtue of different types
+               of role assignment:
+               - user1, scoped to ProjectA by virtue of user role1 assignment
+               - user5, scoped to ProjectB by virtue of group role1 assignment
+               - user4, scoped to ProjectC by virtue of user role2 assignment
+               - user6, scoped to ProjectA by virtue of user role1 assignment
+               - user6, scoped to DomainA by virtue of user role1 assignment
+             - role1 is then deleted
+             - Check the tokens on Project A and B, and DomainA are revoked,
+               but not the one for Project C
+
+        """
+
+        self.role_data_fixtures()
+
+        # Now we are ready to start issuing requests
+        auth_data = self.build_authentication_request(
+            user_id=self.user1['id'],
+            password=self.user1['password'],
+            project_id=self.projectA['id'])
+        tokenA = self.get_requested_token(auth_data)
+        auth_data = self.build_authentication_request(
+            user_id=self.user5['id'],
+            password=self.user5['password'],
+            project_id=self.projectB['id'])
+        tokenB = self.get_requested_token(auth_data)
+        auth_data = self.build_authentication_request(
+            user_id=self.user4['id'],
+            password=self.user4['password'],
+            project_id=self.projectC['id'])
+        tokenC = self.get_requested_token(auth_data)
+        auth_data = self.build_authentication_request(
+            user_id=self.user6['id'],
+            password=self.user6['password'],
+            project_id=self.projectA['id'])
+        tokenD = self.get_requested_token(auth_data)
+        auth_data = self.build_authentication_request(
+            user_id=self.user6['id'],
+            password=self.user6['password'],
+            domain_id=self.domainA['id'])
+        tokenE = self.get_requested_token(auth_data)
+        # Confirm tokens are valid
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': tokenA},
+                  expected_status=200)
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': tokenB},
+                  expected_status=200)
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': tokenC},
+                  expected_status=200)
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': tokenD},
+                  expected_status=200)
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': tokenE},
+                  expected_status=200)
+
+        # Delete the role, which should invalidate the tokens
+        role_url = '/roles/%s' % self.role1['id']
+        self.delete(role_url)
+
+        # Check the tokens that used role1 is invalid
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': tokenA},
+                  expected_status=404)
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': tokenB},
+                  expected_status=404)
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': tokenD},
+                  expected_status=404)
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': tokenE},
+                  expected_status=404)
+
+        # ...but the one using role2 is still valid
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': tokenC},
+                  expected_status=200)
+
+    def test_domain_user_role_assignment_maintains_token(self):
+        """Test user-domain role assignment maintains existing token.
+
+        Test Plan:
+
+        - Get a token for user1, scoped to ProjectA
+        - Create a grant for user1 on DomainB
+        - Check token is still valid
+
+        """
+        auth_data = self.build_authentication_request(
+            user_id=self.user1['id'],
+            password=self.user1['password'],
+            project_id=self.projectA['id'])
+        token = self.get_requested_token(auth_data)
+        # Confirm token is valid
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token},
+                  expected_status=200)
+        # Assign a role, which should not affect the token
+        grant_url = (
+            '/domains/%(domain_id)s/users/%(user_id)s/'
+            'roles/%(role_id)s' % {
+                'domain_id': self.domainB['id'],
+                'user_id': self.user1['id'],
+                'role_id': self.role1['id']})
+        self.put(grant_url)
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token},
+                  expected_status=200)
+
+    def test_disabling_project_revokes_token(self):
+        token = self.get_requested_token(
+            self.build_authentication_request(
+                user_id=self.user3['id'],
+                password=self.user3['password'],
+                project_id=self.projectA['id']))
+
+        # confirm token is valid
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token},
+                  expected_status=200)
+
+        # disable the project, which should invalidate the token
+        self.patch(
+            '/projects/%(project_id)s' % {'project_id': self.projectA['id']},
+            body={'project': {'enabled': False}})
+
+        # user should no longer have access to the project
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token},
+                  expected_status=404)
+        self.v3_authenticate_token(
+            self.build_authentication_request(
+                user_id=self.user3['id'],
+                password=self.user3['password'],
+                project_id=self.projectA['id']),
+            expected_status=401)
+
+    def test_deleting_project_revokes_token(self):
+        token = self.get_requested_token(
+            self.build_authentication_request(
+                user_id=self.user3['id'],
+                password=self.user3['password'],
+                project_id=self.projectA['id']))
+
+        # confirm token is valid
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token},
+                  expected_status=200)
+
+        # delete the project, which should invalidate the token
+        self.delete(
+            '/projects/%(project_id)s' % {'project_id': self.projectA['id']})
+
+        # user should no longer have access to the project
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token},
+                  expected_status=404)
+        self.v3_authenticate_token(
+            self.build_authentication_request(
+                user_id=self.user3['id'],
+                password=self.user3['password'],
+                project_id=self.projectA['id']),
+            expected_status=401)
+
+    def test_deleting_group_grant_revokes_tokens(self):
+        """Test deleting a group grant revokes tokens.
+
+        Test Plan:
+
+        - Get a token for user1, scoped to ProjectA
+        - Get a token for user2, scoped to ProjectA
+        - Get a token for user3, scoped to ProjectA
+        - Delete the grant group1 has on ProjectA
+        - Check tokens for user1 & user2 are no longer valid,
+          since user1 and user2 are members of group1
+        - Check token for user3 is still valid
+
+        """
+        auth_data = self.build_authentication_request(
+            user_id=self.user1['id'],
+            password=self.user1['password'],
+            project_id=self.projectA['id'])
+        token1 = self.get_requested_token(auth_data)
+        auth_data = self.build_authentication_request(
+            user_id=self.user2['id'],
+            password=self.user2['password'],
+            project_id=self.projectA['id'])
+        token2 = self.get_requested_token(auth_data)
+        auth_data = self.build_authentication_request(
+            user_id=self.user3['id'],
+            password=self.user3['password'],
+            project_id=self.projectA['id'])
+        token3 = self.get_requested_token(auth_data)
+        # Confirm tokens are valid
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token1},
+                  expected_status=200)
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token2},
+                  expected_status=200)
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token3},
+                  expected_status=200)
+        # Delete the group grant, which should invalidate the
+        # tokens for user1 and user2
+        grant_url = (
+            '/projects/%(project_id)s/groups/%(group_id)s/'
+            'roles/%(role_id)s' % {
+                'project_id': self.projectA['id'],
+                'group_id': self.group1['id'],
+                'role_id': self.role1['id']})
+        self.delete(grant_url)
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token1},
+                  expected_status=404)
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token2},
+                  expected_status=404)
+        # But user3's token should still be valid
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token3},
+                  expected_status=200)
+
+    def test_domain_group_role_assignment_maintains_token(self):
+        """Test domain-group role assignment maintains existing token.
+
+        Test Plan:
+
+        - Get a token for user1, scoped to ProjectA
+        - Create a grant for group1 on DomainB
+        - Check token is still longer valid
+
+        """
+        auth_data = self.build_authentication_request(
+            user_id=self.user1['id'],
+            password=self.user1['password'],
+            project_id=self.projectA['id'])
+        token = self.get_requested_token(auth_data)
+        # Confirm token is valid
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token},
+                  expected_status=200)
+        # Delete the grant, which should invalidate the token
+        grant_url = (
+            '/domains/%(domain_id)s/groups/%(group_id)s/'
+            'roles/%(role_id)s' % {
+                'domain_id': self.domainB['id'],
+                'group_id': self.group1['id'],
+                'role_id': self.role1['id']})
+        self.put(grant_url)
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token},
+                  expected_status=200)
+
+    def test_group_membership_changes_revokes_token(self):
+        """Test add/removal to/from group revokes token.
+
+        Test Plan:
+
+        - Get a token for user1, scoped to ProjectA
+        - Get a token for user2, scoped to ProjectA
+        - Remove user1 from group1
+        - Check token for user1 is no longer valid
+        - Check token for user2 is still valid, even though
+          user2 is also part of group1
+        - Add user2 to group2
+        - Check token for user2 is now no longer valid
+
+        """
+        auth_data = self.build_authentication_request(
+            user_id=self.user1['id'],
+            password=self.user1['password'],
+            project_id=self.projectA['id'])
+        token1 = self.get_requested_token(auth_data)
+        auth_data = self.build_authentication_request(
+            user_id=self.user2['id'],
+            password=self.user2['password'],
+            project_id=self.projectA['id'])
+        token2 = self.get_requested_token(auth_data)
+        # Confirm tokens are valid
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token1},
+                  expected_status=200)
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token2},
+                  expected_status=200)
+        # Remove user1 from group1, which should invalidate
+        # the token
+        self.delete('/groups/%(group_id)s/users/%(user_id)s' % {
+            'group_id': self.group1['id'],
+            'user_id': self.user1['id']})
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token1},
+                  expected_status=404)
+        # But user2's token should still be valid
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token2},
+                  expected_status=200)
+        # Adding user2 to a group should not invalidate token
+        self.put('/groups/%(group_id)s/users/%(user_id)s' % {
+            'group_id': self.group2['id'],
+            'user_id': self.user2['id']})
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token2},
+                  expected_status=200)
+
+    def test_removing_role_assignment_does_not_affect_other_users(self):
+        """Revoking a role from one user should not affect other users."""
+        user1_token = self.get_requested_token(
+            self.build_authentication_request(
+                user_id=self.user1['id'],
+                password=self.user1['password'],
+                project_id=self.projectA['id']))
+
+        user3_token = self.get_requested_token(
+            self.build_authentication_request(
+                user_id=self.user3['id'],
+                password=self.user3['password'],
+                project_id=self.projectA['id']))
+
+        # delete relationships between user1 and projectA from setUp
+        self.delete(
+            '/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
+                'project_id': self.projectA['id'],
+                'user_id': self.user1['id'],
+                'role_id': self.role1['id']})
+        self.delete(
+            '/projects/%(project_id)s/groups/%(group_id)s/roles/%(role_id)s' %
+            {'project_id': self.projectA['id'],
+             'group_id': self.group1['id'],
+             'role_id': self.role1['id']})
+
+        # authorization for the first user should now fail
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': user1_token},
+                  expected_status=404)
+        self.v3_authenticate_token(
+            self.build_authentication_request(
+                user_id=self.user1['id'],
+                password=self.user1['password'],
+                project_id=self.projectA['id']),
+            expected_status=401)
+
+        # authorization for the second user should still succeed
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': user3_token},
+                  expected_status=200)
+        self.v3_authenticate_token(
+            self.build_authentication_request(
+                user_id=self.user3['id'],
+                password=self.user3['password'],
+                project_id=self.projectA['id']))
+
+    def test_deleting_project_deletes_grants(self):
+        # This is to make it a little bit more pretty with PEP8
+        role_path = ('/projects/%(project_id)s/users/%(user_id)s/'
+                     'roles/%(role_id)s')
+        role_path = role_path % {'user_id': self.user['id'],
+                                 'project_id': self.projectA['id'],
+                                 'role_id': self.role['id']}
+
+        # grant the user a role on the project
+        self.put(role_path)
+
+        # delete the project, which should remove the roles
+        self.delete(
+            '/projects/%(project_id)s' % {'project_id': self.projectA['id']})
+
+        # Make sure that we get a NotFound(404) when heading that role.
+        self.head(role_path, expected_status=404)
+
+    def get_v2_token(self, token=None, project_id=None):
+        body = {'auth': {}, }
+
+        if token:
+            body['auth']['token'] = {
+                'id': token
+            }
+        else:
+            body['auth']['passwordCredentials'] = {
+                'username': self.default_domain_user['name'],
+                'password': self.default_domain_user['password'],
+            }
+
+        if project_id:
+            body['auth']['tenantId'] = project_id
+
+        r = self.admin_request(method='POST', path='/v2.0/tokens', body=body)
+        return r.json_body['access']['token']['id']
+
+    def test_revoke_v2_token_no_check(self):
+        # Test that a V2 token can be revoked without validating it first.
+
+        token = self.get_v2_token()
+
+        self.delete('/auth/tokens',
+                    headers={'X-Subject-Token': token},
+                    expected_status=204)
+
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token},
+                  expected_status=404)
+
+    def test_revoke_token_from_token(self):
+        # Test that a scoped token can be requested from an unscoped token,
+        # the scoped token can be revoked, and the unscoped token remains
+        # valid.
+
+        unscoped_token = self.get_requested_token(
+            self.build_authentication_request(
+                user_id=self.user1['id'],
+                password=self.user1['password']))
+
+        # Get a project-scoped token from the unscoped token
+        project_scoped_token = self.get_requested_token(
+            self.build_authentication_request(
+                token=unscoped_token,
+                project_id=self.projectA['id']))
+
+        # Get a domain-scoped token from the unscoped token
+        domain_scoped_token = self.get_requested_token(
+            self.build_authentication_request(
+                token=unscoped_token,
+                domain_id=self.domainA['id']))
+
+        # revoke the project-scoped token.
+        self.delete('/auth/tokens',
+                    headers={'X-Subject-Token': project_scoped_token},
+                    expected_status=204)
+
+        # The project-scoped token is invalidated.
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': project_scoped_token},
+                  expected_status=404)
+
+        # The unscoped token should still be valid.
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': unscoped_token},
+                  expected_status=200)
+
+        # The domain-scoped token should still be valid.
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': domain_scoped_token},
+                  expected_status=200)
+
+        # revoke the domain-scoped token.
+        self.delete('/auth/tokens',
+                    headers={'X-Subject-Token': domain_scoped_token},
+                    expected_status=204)
+
+        # The domain-scoped token is invalid.
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': domain_scoped_token},
+                  expected_status=404)
+
+        # The unscoped token should still be valid.
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': unscoped_token},
+                  expected_status=200)
+
+    def test_revoke_token_from_token_v2(self):
+        # Test that a scoped token can be requested from an unscoped token,
+        # the scoped token can be revoked, and the unscoped token remains
+        # valid.
+
+        # FIXME(blk-u): This isn't working correctly. The scoped token should
+        # be revoked. See bug 1347318.
+
+        unscoped_token = self.get_v2_token()
+
+        # Get a project-scoped token from the unscoped token
+        project_scoped_token = self.get_v2_token(
+            token=unscoped_token, project_id=self.default_domain_project['id'])
+
+        # revoke the project-scoped token.
+        self.delete('/auth/tokens',
+                    headers={'X-Subject-Token': project_scoped_token},
+                    expected_status=204)
+
+        # The project-scoped token is invalidated.
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': project_scoped_token},
+                  expected_status=404)
+
+        # The unscoped token should still be valid.
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': unscoped_token},
+                  expected_status=200)
+
+
+class TestTokenRevokeApi(TestTokenRevokeById):
+    EXTENSION_NAME = 'revoke'
+    EXTENSION_TO_ADD = 'revoke_extension'
+
+    """Test token revocation on the v3 Identity API."""
+    def config_overrides(self):
+        super(TestTokenRevokeApi, self).config_overrides()
+        self.config_fixture.config(
+            group='revoke',
+            driver='keystone.contrib.revoke.backends.kvs.Revoke')
+        self.config_fixture.config(
+            group='token',
+            provider='keystone.token.providers.pki.Provider',
+            revoke_by_id=False)
+
+    def assertValidDeletedProjectResponse(self, events_response, project_id):
+        events = events_response['events']
+        self.assertEqual(1, len(events))
+        self.assertEqual(project_id, events[0]['project_id'])
+        self.assertIsNotNone(events[0]['issued_before'])
+        self.assertIsNotNone(events_response['links'])
+        del (events_response['events'][0]['issued_before'])
+        del (events_response['links'])
+        expected_response = {'events': [{'project_id': project_id}]}
+        self.assertEqual(expected_response, events_response)
+
+    def assertDomainInList(self, events_response, domain_id):
+        events = events_response['events']
+        self.assertEqual(1, len(events))
+        self.assertEqual(domain_id, events[0]['domain_id'])
+        self.assertIsNotNone(events[0]['issued_before'])
+        self.assertIsNotNone(events_response['links'])
+        del (events_response['events'][0]['issued_before'])
+        del (events_response['links'])
+        expected_response = {'events': [{'domain_id': domain_id}]}
+        self.assertEqual(expected_response, events_response)
+
+    def assertValidRevokedTokenResponse(self, events_response, **kwargs):
+        events = events_response['events']
+        self.assertEqual(1, len(events))
+        for k, v in six.iteritems(kwargs):
+            self.assertEqual(v, events[0].get(k))
+        self.assertIsNotNone(events[0]['issued_before'])
+        self.assertIsNotNone(events_response['links'])
+        del (events_response['events'][0]['issued_before'])
+        del (events_response['links'])
+
+        expected_response = {'events': [kwargs]}
+        self.assertEqual(expected_response, events_response)
+
+    def test_revoke_token(self):
+        scoped_token = self.get_scoped_token()
+        headers = {'X-Subject-Token': scoped_token}
+        response = self.get('/auth/tokens', headers=headers,
+                            expected_status=200).json_body['token']
+
+        self.delete('/auth/tokens', headers=headers, expected_status=204)
+        self.head('/auth/tokens', headers=headers, expected_status=404)
+        events_response = self.get('/OS-REVOKE/events',
+                                   expected_status=200).json_body
+        self.assertValidRevokedTokenResponse(events_response,
+                                             audit_id=response['audit_ids'][0])
+
+    def test_revoke_v2_token(self):
+        token = self.get_v2_token()
+        headers = {'X-Subject-Token': token}
+        response = self.get('/auth/tokens', headers=headers,
+                            expected_status=200).json_body['token']
+        self.delete('/auth/tokens', headers=headers, expected_status=204)
+        self.head('/auth/tokens', headers=headers, expected_status=404)
+        events_response = self.get('/OS-REVOKE/events',
+                                   expected_status=200).json_body
+
+        self.assertValidRevokedTokenResponse(
+            events_response,
+            audit_id=response['audit_ids'][0])
+
+    def test_revoke_by_id_false_410(self):
+        self.get('/auth/tokens/OS-PKI/revoked', expected_status=410)
+
+    def test_list_delete_project_shows_in_event_list(self):
+        self.role_data_fixtures()
+        events = self.get('/OS-REVOKE/events',
+                          expected_status=200).json_body['events']
+        self.assertEqual([], events)
+        self.delete(
+            '/projects/%(project_id)s' % {'project_id': self.projectA['id']})
+        events_response = self.get('/OS-REVOKE/events',
+                                   expected_status=200).json_body
+
+        self.assertValidDeletedProjectResponse(events_response,
+                                               self.projectA['id'])
+
+    def test_disable_domain_shows_in_event_list(self):
+        events = self.get('/OS-REVOKE/events',
+                          expected_status=200).json_body['events']
+        self.assertEqual([], events)
+        disable_body = {'domain': {'enabled': False}}
+        self.patch(
+            '/domains/%(project_id)s' % {'project_id': self.domainA['id']},
+            body=disable_body)
+
+        events = self.get('/OS-REVOKE/events',
+                          expected_status=200).json_body
+
+        self.assertDomainInList(events, self.domainA['id'])
+
+    def assertEventDataInList(self, events, **kwargs):
+        found = False
+        for e in events:
+            for key, value in six.iteritems(kwargs):
+                try:
+                    if e[key] != value:
+                        break
+                except KeyError:
+                    # Break the loop and present a nice error instead of
+                    # KeyError
+                    break
+            else:
+                # If the value of the event[key] matches the value of the kwarg
+                # for each item in kwargs, the event was fully matched and
+                # the assertTrue below should succeed.
+                found = True
+        self.assertTrue(found,
+                        'event with correct values not in list, expected to '
+                        'find event with key-value pairs. Expected: '
+                        '"%(expected)s" Events: "%(events)s"' %
+                        {'expected': ','.join(
+                            ["'%s=%s'" % (k, v) for k, v in six.iteritems(
+                                kwargs)]),
+                         'events': events})
+
+    def test_list_delete_token_shows_in_event_list(self):
+        self.role_data_fixtures()
+        events = self.get('/OS-REVOKE/events',
+                          expected_status=200).json_body['events']
+        self.assertEqual([], events)
+
+        scoped_token = self.get_scoped_token()
+        headers = {'X-Subject-Token': scoped_token}
+        auth_req = self.build_authentication_request(token=scoped_token)
+        response = self.v3_authenticate_token(auth_req)
+        token2 = response.json_body['token']
+        headers2 = {'X-Subject-Token': response.headers['X-Subject-Token']}
+
+        response = self.v3_authenticate_token(auth_req)
+        response.json_body['token']
+        headers3 = {'X-Subject-Token': response.headers['X-Subject-Token']}
+
+        self.head('/auth/tokens', headers=headers, expected_status=200)
+        self.head('/auth/tokens', headers=headers2, expected_status=200)
+        self.head('/auth/tokens', headers=headers3, expected_status=200)
+
+        self.delete('/auth/tokens', headers=headers, expected_status=204)
+        # NOTE(ayoung): not deleting token3, as it should be deleted
+        # by previous
+        events_response = self.get('/OS-REVOKE/events',
+                                   expected_status=200).json_body
+        events = events_response['events']
+        self.assertEqual(1, len(events))
+        self.assertEventDataInList(
+            events,
+            audit_id=token2['audit_ids'][1])
+        self.head('/auth/tokens', headers=headers, expected_status=404)
+        self.head('/auth/tokens', headers=headers2, expected_status=200)
+        self.head('/auth/tokens', headers=headers3, expected_status=200)
+
+    def test_list_with_filter(self):
+
+        self.role_data_fixtures()
+        events = self.get('/OS-REVOKE/events',
+                          expected_status=200).json_body['events']
+        self.assertEqual(0, len(events))
+
+        scoped_token = self.get_scoped_token()
+        headers = {'X-Subject-Token': scoped_token}
+        auth = self.build_authentication_request(token=scoped_token)
+        headers2 = {'X-Subject-Token': self.get_requested_token(auth)}
+        self.delete('/auth/tokens', headers=headers, expected_status=204)
+        self.delete('/auth/tokens', headers=headers2, expected_status=204)
+
+        events = self.get('/OS-REVOKE/events',
+                          expected_status=200).json_body['events']
+
+        self.assertEqual(2, len(events))
+        future = timeutils.isotime(timeutils.utcnow() +
+                                   datetime.timedelta(seconds=1000))
+
+        events = self.get('/OS-REVOKE/events?since=%s' % (future),
+                          expected_status=200).json_body['events']
+        self.assertEqual(0, len(events))
+
+
+class TestAuthExternalDisabled(test_v3.RestfulTestCase):
+    def config_overrides(self):
+        super(TestAuthExternalDisabled, self).config_overrides()
+        self.config_fixture.config(
+            group='auth',
+            methods=['password', 'token'])
+
+    def test_remote_user_disabled(self):
+        api = auth.controllers.Auth()
+        remote_user = '%s@%s' % (self.user['name'], self.domain['name'])
+        context, auth_info, auth_context = self.build_external_auth_request(
+            remote_user)
+        self.assertRaises(exception.Unauthorized,
+                          api.authenticate,
+                          context,
+                          auth_info,
+                          auth_context)
+
+
+class TestAuthExternalLegacyDefaultDomain(test_v3.RestfulTestCase):
+    content_type = 'json'
+
+    def config_overrides(self):
+        super(TestAuthExternalLegacyDefaultDomain, self).config_overrides()
+        self.auth_plugin_config_override(
+            methods=['external', 'password', 'token'],
+            external='keystone.auth.plugins.external.LegacyDefaultDomain',
+            password='keystone.auth.plugins.password.Password',
+            token='keystone.auth.plugins.token.Token')
+
+    def test_remote_user_no_realm(self):
+        self.config_fixture.config(group='auth', methods='external')
+        api = auth.controllers.Auth()
+        context, auth_info, auth_context = self.build_external_auth_request(
+            self.default_domain_user['name'])
+        api.authenticate(context, auth_info, auth_context)
+        self.assertEqual(auth_context['user_id'],
+                         self.default_domain_user['id'])
+
+    def test_remote_user_no_domain(self):
+        api = auth.controllers.Auth()
+        context, auth_info, auth_context = self.build_external_auth_request(
+            self.user['name'])
+        self.assertRaises(exception.Unauthorized,
+                          api.authenticate,
+                          context,
+                          auth_info,
+                          auth_context)
+
+
+class TestAuthExternalLegacyDomain(test_v3.RestfulTestCase):
+    content_type = 'json'
+
+    def config_overrides(self):
+        super(TestAuthExternalLegacyDomain, self).config_overrides()
+        self.auth_plugin_config_override(
+            methods=['external', 'password', 'token'],
+            external='keystone.auth.plugins.external.LegacyDomain',
+            password='keystone.auth.plugins.password.Password',
+            token='keystone.auth.plugins.token.Token')
+
+    def test_remote_user_with_realm(self):
+        api = auth.controllers.Auth()
+        remote_user = '%s@%s' % (self.user['name'], self.domain['name'])
+        context, auth_info, auth_context = self.build_external_auth_request(
+            remote_user)
+
+        api.authenticate(context, auth_info, auth_context)
+        self.assertEqual(auth_context['user_id'], self.user['id'])
+
+        # Now test to make sure the user name can, itself, contain the
+        # '@' character.
+        user = {'name': 'myname@mydivision'}
+        self.identity_api.update_user(self.user['id'], user)
+        remote_user = '%s@%s' % (user['name'], self.domain['name'])
+        context, auth_info, auth_context = self.build_external_auth_request(
+            remote_user)
+
+        api.authenticate(context, auth_info, auth_context)
+        self.assertEqual(auth_context['user_id'], self.user['id'])
+
+    def test_project_id_scoped_with_remote_user(self):
+        self.config_fixture.config(group='token', bind=['kerberos'])
+        auth_data = self.build_authentication_request(
+            project_id=self.project['id'])
+        remote_user = '%s@%s' % (self.user['name'], self.domain['name'])
+        self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
+                                             'AUTH_TYPE': 'Negotiate'})
+        r = self.v3_authenticate_token(auth_data)
+        token = self.assertValidProjectScopedTokenResponse(r)
+        self.assertEqual(token['bind']['kerberos'], self.user['name'])
+
+    def test_unscoped_bind_with_remote_user(self):
+        self.config_fixture.config(group='token', bind=['kerberos'])
+        auth_data = self.build_authentication_request()
+        remote_user = '%s@%s' % (self.user['name'], self.domain['name'])
+        self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
+                                             'AUTH_TYPE': 'Negotiate'})
+        r = self.v3_authenticate_token(auth_data)
+        token = self.assertValidUnscopedTokenResponse(r)
+        self.assertEqual(token['bind']['kerberos'], self.user['name'])
+
+
+class TestAuthExternalDomain(test_v3.RestfulTestCase):
+    content_type = 'json'
+
+    def config_overrides(self):
+        super(TestAuthExternalDomain, self).config_overrides()
+        self.kerberos = False
+        self.auth_plugin_config_override(
+            methods=['external', 'password', 'token'],
+            external='keystone.auth.plugins.external.Domain',
+            password='keystone.auth.plugins.password.Password',
+            token='keystone.auth.plugins.token.Token')
+
+    def test_remote_user_with_realm(self):
+        api = auth.controllers.Auth()
+        remote_user = self.user['name']
+        remote_domain = self.domain['name']
+        context, auth_info, auth_context = self.build_external_auth_request(
+            remote_user, remote_domain=remote_domain, kerberos=self.kerberos)
+
+        api.authenticate(context, auth_info, auth_context)
+        self.assertEqual(auth_context['user_id'], self.user['id'])
+
+        # Now test to make sure the user name can, itself, contain the
+        # '@' character.
+        user = {'name': 'myname@mydivision'}
+        self.identity_api.update_user(self.user['id'], user)
+        remote_user = user['name']
+        context, auth_info, auth_context = self.build_external_auth_request(
+            remote_user, remote_domain=remote_domain, kerberos=self.kerberos)
+
+        api.authenticate(context, auth_info, auth_context)
+        self.assertEqual(auth_context['user_id'], self.user['id'])
+
+    def test_project_id_scoped_with_remote_user(self):
+        self.config_fixture.config(group='token', bind=['kerberos'])
+        auth_data = self.build_authentication_request(
+            project_id=self.project['id'],
+            kerberos=self.kerberos)
+        remote_user = self.user['name']
+        remote_domain = self.domain['name']
+        self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
+                                             'REMOTE_DOMAIN': remote_domain,
+                                             'AUTH_TYPE': 'Negotiate'})
+        r = self.v3_authenticate_token(auth_data)
+        token = self.assertValidProjectScopedTokenResponse(r)
+        self.assertEqual(token['bind']['kerberos'], self.user['name'])
+
+    def test_unscoped_bind_with_remote_user(self):
+        self.config_fixture.config(group='token', bind=['kerberos'])
+        auth_data = self.build_authentication_request(kerberos=self.kerberos)
+        remote_user = self.user['name']
+        remote_domain = self.domain['name']
+        self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
+                                             'REMOTE_DOMAIN': remote_domain,
+                                             'AUTH_TYPE': 'Negotiate'})
+        r = self.v3_authenticate_token(auth_data)
+        token = self.assertValidUnscopedTokenResponse(r)
+        self.assertEqual(token['bind']['kerberos'], self.user['name'])
+
+
+class TestAuthKerberos(TestAuthExternalDomain):
+
+    def config_overrides(self):
+        super(TestAuthKerberos, self).config_overrides()
+        self.kerberos = True
+        self.auth_plugin_config_override(
+            methods=['kerberos', 'password', 'token'],
+            kerberos='keystone.auth.plugins.external.KerberosDomain',
+            password='keystone.auth.plugins.password.Password',
+            token='keystone.auth.plugins.token.Token')
+
+
+class TestAuth(test_v3.RestfulTestCase):
+
+    def test_unscoped_token_with_user_id(self):
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidUnscopedTokenResponse(r)
+
+    def test_unscoped_token_with_user_domain_id(self):
+        auth_data = self.build_authentication_request(
+            username=self.user['name'],
+            user_domain_id=self.domain['id'],
+            password=self.user['password'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidUnscopedTokenResponse(r)
+
+    def test_unscoped_token_with_user_domain_name(self):
+        auth_data = self.build_authentication_request(
+            username=self.user['name'],
+            user_domain_name=self.domain['name'],
+            password=self.user['password'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidUnscopedTokenResponse(r)
+
+    def test_project_id_scoped_token_with_user_id(self):
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=self.project['id'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidProjectScopedTokenResponse(r)
+
+    def _second_project_as_default(self):
+        ref = self.new_project_ref(domain_id=self.domain_id)
+        r = self.post('/projects', body={'project': ref})
+        project = self.assertValidProjectResponse(r, ref)
+
+        # grant the user a role on the project
+        self.put(
+            '/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
+                'user_id': self.user['id'],
+                'project_id': project['id'],
+                'role_id': self.role['id']})
+
+        # set the user's preferred project
+        body = {'user': {'default_project_id': project['id']}}
+        r = self.patch('/users/%(user_id)s' % {
+            'user_id': self.user['id']},
+            body=body)
+        self.assertValidUserResponse(r)
+
+        return project
+
+    def test_default_project_id_scoped_token_with_user_id(self):
+        project = self._second_project_as_default()
+
+        # attempt to authenticate without requesting a project
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidProjectScopedTokenResponse(r)
+        self.assertEqual(r.result['token']['project']['id'], project['id'])
+
+    def test_default_project_id_scoped_token_with_user_id_no_catalog(self):
+        project = self._second_project_as_default()
+
+        # attempt to authenticate without requesting a project
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'])
+        r = self.post('/auth/tokens?nocatalog', body=auth_data, noauth=True)
+        self.assertValidProjectScopedTokenResponse(r, require_catalog=False)
+        self.assertEqual(r.result['token']['project']['id'], project['id'])
+
+    def test_explicit_unscoped_token(self):
+        self._second_project_as_default()
+
+        # attempt to authenticate without requesting a project
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            unscoped="unscoped")
+        r = self.post('/auth/tokens', body=auth_data, noauth=True)
+
+        self.assertIsNone(r.result['token'].get('project'))
+        self.assertIsNone(r.result['token'].get('domain'))
+        self.assertIsNone(r.result['token'].get('scope'))
+
+    def test_implicit_project_id_scoped_token_with_user_id_no_catalog(self):
+        # attempt to authenticate without requesting a project
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=self.project['id'])
+        r = self.post('/auth/tokens?nocatalog', body=auth_data, noauth=True)
+        self.assertValidProjectScopedTokenResponse(r, require_catalog=False)
+        self.assertEqual(r.result['token']['project']['id'],
+                         self.project['id'])
+
+    def test_auth_catalog_attributes(self):
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=self.project['id'])
+        r = self.v3_authenticate_token(auth_data)
+
+        catalog = r.result['token']['catalog']
+        self.assertEqual(1, len(catalog))
+        catalog = catalog[0]
+
+        self.assertEqual(self.service['id'], catalog['id'])
+        self.assertEqual(self.service['name'], catalog['name'])
+        self.assertEqual(self.service['type'], catalog['type'])
+
+        endpoint = catalog['endpoints']
+        self.assertEqual(1, len(endpoint))
+        endpoint = endpoint[0]
+
+        self.assertEqual(self.endpoint['id'], endpoint['id'])
+        self.assertEqual(self.endpoint['interface'], endpoint['interface'])
+        self.assertEqual(self.endpoint['region_id'], endpoint['region_id'])
+        self.assertEqual(self.endpoint['url'], endpoint['url'])
+
+    def _check_disabled_endpoint_result(self, catalog, disabled_endpoint_id):
+        endpoints = catalog[0]['endpoints']
+        endpoint_ids = [ep['id'] for ep in endpoints]
+        self.assertEqual([self.endpoint_id], endpoint_ids)
+
+    def test_auth_catalog_disabled_service(self):
+        """On authenticate, get a catalog that excludes disabled services."""
+        # although the child endpoint is enabled, the service is disabled
+        self.assertTrue(self.endpoint['enabled'])
+        self.catalog_api.update_service(
+            self.endpoint['service_id'], {'enabled': False})
+        service = self.catalog_api.get_service(self.endpoint['service_id'])
+        self.assertFalse(service['enabled'])
+
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=self.project['id'])
+        r = self.v3_authenticate_token(auth_data)
+
+        self.assertEqual([], r.result['token']['catalog'])
+
+    def test_auth_catalog_disabled_endpoint(self):
+        """On authenticate, get a catalog that excludes disabled endpoints."""
+
+        # Create a disabled endpoint that's like the enabled one.
+        disabled_endpoint_ref = copy.copy(self.endpoint)
+        disabled_endpoint_id = uuid.uuid4().hex
+        disabled_endpoint_ref.update({
+            'id': disabled_endpoint_id,
+            'enabled': False,
+            'interface': 'internal'
+        })
+        self.catalog_api.create_endpoint(disabled_endpoint_id,
+                                         disabled_endpoint_ref)
+
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=self.project['id'])
+        r = self.v3_authenticate_token(auth_data)
+
+        self._check_disabled_endpoint_result(r.result['token']['catalog'],
+                                             disabled_endpoint_id)
+
+    def test_project_id_scoped_token_with_user_id_401(self):
+        project = self.new_project_ref(domain_id=self.domain_id)
+        self.resource_api.create_project(project['id'], project)
+
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=project['id'])
+        self.v3_authenticate_token(auth_data, expected_status=401)
+
+    def test_user_and_group_roles_scoped_token(self):
+        """Test correct roles are returned in scoped token.
+
+        Test Plan:
+
+        - Create a domain, with 1 project, 2 users (user1 and user2)
+          and 2 groups (group1 and group2)
+        - Make user1 a member of group1, user2 a member of group2
+        - Create 8 roles, assigning them to each of the 8 combinations
+          of users/groups on domain/project
+        - Get a project scoped token for user1, checking that the right
+          two roles are returned (one directly assigned, one by virtue
+          of group membership)
+        - Repeat this for a domain scoped token
+        - Make user1 also a member of group2
+        - Get another scoped token making sure the additional role
+          shows up
+        - User2 is just here as a spoiler, to make sure we don't get
+          any roles uniquely assigned to it returned in any of our
+          tokens
+
+        """
+
+        domainA = self.new_domain_ref()
+        self.resource_api.create_domain(domainA['id'], domainA)
+        projectA = self.new_project_ref(domain_id=domainA['id'])
+        self.resource_api.create_project(projectA['id'], projectA)
+
+        user1 = self.new_user_ref(
+            domain_id=domainA['id'])
+        password = user1['password']
+        user1 = self.identity_api.create_user(user1)
+        user1['password'] = password
+
+        user2 = self.new_user_ref(
+            domain_id=domainA['id'])
+        password = user2['password']
+        user2 = self.identity_api.create_user(user2)
+        user2['password'] = password
+
+        group1 = self.new_group_ref(
+            domain_id=domainA['id'])
+        group1 = self.identity_api.create_group(group1)
+
+        group2 = self.new_group_ref(
+            domain_id=domainA['id'])
+        group2 = self.identity_api.create_group(group2)
+
+        self.identity_api.add_user_to_group(user1['id'],
+                                            group1['id'])
+        self.identity_api.add_user_to_group(user2['id'],
+                                            group2['id'])
+
+        # Now create all the roles and assign them
+        role_list = []
+        for _ in range(8):
+            role = self.new_role_ref()
+            self.role_api.create_role(role['id'], role)
+            role_list.append(role)
+
+        self.assignment_api.create_grant(role_list[0]['id'],
+                                         user_id=user1['id'],
+                                         domain_id=domainA['id'])
+        self.assignment_api.create_grant(role_list[1]['id'],
+                                         user_id=user1['id'],
+                                         project_id=projectA['id'])
+        self.assignment_api.create_grant(role_list[2]['id'],
+                                         user_id=user2['id'],
+                                         domain_id=domainA['id'])
+        self.assignment_api.create_grant(role_list[3]['id'],
+                                         user_id=user2['id'],
+                                         project_id=projectA['id'])
+        self.assignment_api.create_grant(role_list[4]['id'],
+                                         group_id=group1['id'],
+                                         domain_id=domainA['id'])
+        self.assignment_api.create_grant(role_list[5]['id'],
+                                         group_id=group1['id'],
+                                         project_id=projectA['id'])
+        self.assignment_api.create_grant(role_list[6]['id'],
+                                         group_id=group2['id'],
+                                         domain_id=domainA['id'])
+        self.assignment_api.create_grant(role_list[7]['id'],
+                                         group_id=group2['id'],
+                                         project_id=projectA['id'])
+
+        # First, get a project scoped token - which should
+        # contain the direct user role and the one by virtue
+        # of group membership
+        auth_data = self.build_authentication_request(
+            user_id=user1['id'],
+            password=user1['password'],
+            project_id=projectA['id'])
+        r = self.v3_authenticate_token(auth_data)
+        token = self.assertValidScopedTokenResponse(r)
+        roles_ids = []
+        for ref in token['roles']:
+            roles_ids.append(ref['id'])
+        self.assertEqual(2, len(token['roles']))
+        self.assertIn(role_list[1]['id'], roles_ids)
+        self.assertIn(role_list[5]['id'], roles_ids)
+
+        # Now the same thing for a domain scoped token
+        auth_data = self.build_authentication_request(
+            user_id=user1['id'],
+            password=user1['password'],
+            domain_id=domainA['id'])
+        r = self.v3_authenticate_token(auth_data)
+        token = self.assertValidScopedTokenResponse(r)
+        roles_ids = []
+        for ref in token['roles']:
+            roles_ids.append(ref['id'])
+        self.assertEqual(2, len(token['roles']))
+        self.assertIn(role_list[0]['id'], roles_ids)
+        self.assertIn(role_list[4]['id'], roles_ids)
+
+        # Finally, add user1 to the 2nd group, and get a new
+        # scoped token - the extra role should now be included
+        # by virtue of the 2nd group
+        self.identity_api.add_user_to_group(user1['id'],
+                                            group2['id'])
+        auth_data = self.build_authentication_request(
+            user_id=user1['id'],
+            password=user1['password'],
+            project_id=projectA['id'])
+        r = self.v3_authenticate_token(auth_data)
+        token = self.assertValidScopedTokenResponse(r)
+        roles_ids = []
+        for ref in token['roles']:
+            roles_ids.append(ref['id'])
+        self.assertEqual(3, len(token['roles']))
+        self.assertIn(role_list[1]['id'], roles_ids)
+        self.assertIn(role_list[5]['id'], roles_ids)
+        self.assertIn(role_list[7]['id'], roles_ids)
+
+    def test_auth_token_cross_domain_group_and_project(self):
+        """Verify getting a token in cross domain group/project roles."""
+        # create domain, project and group and grant roles to user
+        domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(domain1['id'], domain1)
+        project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+                    'domain_id': domain1['id']}
+        self.resource_api.create_project(project1['id'], project1)
+        user_foo = self.new_user_ref(domain_id=test_v3.DEFAULT_DOMAIN_ID)
+        password = user_foo['password']
+        user_foo = self.identity_api.create_user(user_foo)
+        user_foo['password'] = password
+        role_member = {'id': uuid.uuid4().hex,
+                       'name': uuid.uuid4().hex}
+        self.role_api.create_role(role_member['id'], role_member)
+        role_admin = {'id': uuid.uuid4().hex,
+                      'name': uuid.uuid4().hex}
+        self.role_api.create_role(role_admin['id'], role_admin)
+        role_foo_domain1 = {'id': uuid.uuid4().hex,
+                            'name': uuid.uuid4().hex}
+        self.role_api.create_role(role_foo_domain1['id'], role_foo_domain1)
+        role_group_domain1 = {'id': uuid.uuid4().hex,
+                              'name': uuid.uuid4().hex}
+        self.role_api.create_role(role_group_domain1['id'], role_group_domain1)
+        self.assignment_api.add_user_to_project(project1['id'],
+                                                user_foo['id'])
+        new_group = {'domain_id': domain1['id'], 'name': uuid.uuid4().hex}
+        new_group = self.identity_api.create_group(new_group)
+        self.identity_api.add_user_to_group(user_foo['id'],
+                                            new_group['id'])
+        self.assignment_api.create_grant(
+            user_id=user_foo['id'],
+            project_id=project1['id'],
+            role_id=role_member['id'])
+        self.assignment_api.create_grant(
+            group_id=new_group['id'],
+            project_id=project1['id'],
+            role_id=role_admin['id'])
+        self.assignment_api.create_grant(
+            user_id=user_foo['id'],
+            domain_id=domain1['id'],
+            role_id=role_foo_domain1['id'])
+        self.assignment_api.create_grant(
+            group_id=new_group['id'],
+            domain_id=domain1['id'],
+            role_id=role_group_domain1['id'])
+
+        # Get a scoped token for the project
+        auth_data = self.build_authentication_request(
+            username=user_foo['name'],
+            user_domain_id=test_v3.DEFAULT_DOMAIN_ID,
+            password=user_foo['password'],
+            project_name=project1['name'],
+            project_domain_id=domain1['id'])
+
+        r = self.v3_authenticate_token(auth_data)
+        scoped_token = self.assertValidScopedTokenResponse(r)
+        project = scoped_token["project"]
+        roles_ids = []
+        for ref in scoped_token['roles']:
+            roles_ids.append(ref['id'])
+        self.assertEqual(project1['id'], project["id"])
+        self.assertIn(role_member['id'], roles_ids)
+        self.assertIn(role_admin['id'], roles_ids)
+        self.assertNotIn(role_foo_domain1['id'], roles_ids)
+        self.assertNotIn(role_group_domain1['id'], roles_ids)
+
+    def test_project_id_scoped_token_with_user_domain_id(self):
+        auth_data = self.build_authentication_request(
+            username=self.user['name'],
+            user_domain_id=self.domain['id'],
+            password=self.user['password'],
+            project_id=self.project['id'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidProjectScopedTokenResponse(r)
+
+    def test_project_id_scoped_token_with_user_domain_name(self):
+        auth_data = self.build_authentication_request(
+            username=self.user['name'],
+            user_domain_name=self.domain['name'],
+            password=self.user['password'],
+            project_id=self.project['id'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidProjectScopedTokenResponse(r)
+
+    def test_domain_id_scoped_token_with_user_id(self):
+        path = '/domains/%s/users/%s/roles/%s' % (
+            self.domain['id'], self.user['id'], self.role['id'])
+        self.put(path=path)
+
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            domain_id=self.domain['id'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidDomainScopedTokenResponse(r)
+
+    def test_domain_id_scoped_token_with_user_domain_id(self):
+        path = '/domains/%s/users/%s/roles/%s' % (
+            self.domain['id'], self.user['id'], self.role['id'])
+        self.put(path=path)
+
+        auth_data = self.build_authentication_request(
+            username=self.user['name'],
+            user_domain_id=self.domain['id'],
+            password=self.user['password'],
+            domain_id=self.domain['id'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidDomainScopedTokenResponse(r)
+
+    def test_domain_id_scoped_token_with_user_domain_name(self):
+        path = '/domains/%s/users/%s/roles/%s' % (
+            self.domain['id'], self.user['id'], self.role['id'])
+        self.put(path=path)
+
+        auth_data = self.build_authentication_request(
+            username=self.user['name'],
+            user_domain_name=self.domain['name'],
+            password=self.user['password'],
+            domain_id=self.domain['id'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidDomainScopedTokenResponse(r)
+
+    def test_domain_name_scoped_token_with_user_id(self):
+        path = '/domains/%s/users/%s/roles/%s' % (
+            self.domain['id'], self.user['id'], self.role['id'])
+        self.put(path=path)
+
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            domain_name=self.domain['name'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidDomainScopedTokenResponse(r)
+
+    def test_domain_name_scoped_token_with_user_domain_id(self):
+        path = '/domains/%s/users/%s/roles/%s' % (
+            self.domain['id'], self.user['id'], self.role['id'])
+        self.put(path=path)
+
+        auth_data = self.build_authentication_request(
+            username=self.user['name'],
+            user_domain_id=self.domain['id'],
+            password=self.user['password'],
+            domain_name=self.domain['name'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidDomainScopedTokenResponse(r)
+
+    def test_domain_name_scoped_token_with_user_domain_name(self):
+        path = '/domains/%s/users/%s/roles/%s' % (
+            self.domain['id'], self.user['id'], self.role['id'])
+        self.put(path=path)
+
+        auth_data = self.build_authentication_request(
+            username=self.user['name'],
+            user_domain_name=self.domain['name'],
+            password=self.user['password'],
+            domain_name=self.domain['name'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidDomainScopedTokenResponse(r)
+
+    def test_domain_scope_token_with_group_role(self):
+        group = self.new_group_ref(
+            domain_id=self.domain_id)
+        group = self.identity_api.create_group(group)
+
+        # add user to group
+        self.identity_api.add_user_to_group(self.user['id'], group['id'])
+
+        # grant the domain role to group
+        path = '/domains/%s/groups/%s/roles/%s' % (
+            self.domain['id'], group['id'], self.role['id'])
+        self.put(path=path)
+
+        # now get a domain-scoped token
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            domain_id=self.domain['id'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidDomainScopedTokenResponse(r)
+
+    def test_domain_scope_token_with_name(self):
+        # grant the domain role to user
+        path = '/domains/%s/users/%s/roles/%s' % (
+            self.domain['id'], self.user['id'], self.role['id'])
+        self.put(path=path)
+        # now get a domain-scoped token
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            domain_name=self.domain['name'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidDomainScopedTokenResponse(r)
+
+    def test_domain_scope_failed(self):
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            domain_id=self.domain['id'])
+        self.v3_authenticate_token(auth_data, expected_status=401)
+
+    def test_auth_with_id(self):
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidUnscopedTokenResponse(r)
+
+        token = r.headers.get('X-Subject-Token')
+
+        # test token auth
+        auth_data = self.build_authentication_request(token=token)
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidUnscopedTokenResponse(r)
+
+    def get_v2_token(self, tenant_id=None):
+        body = {
+            'auth': {
+                'passwordCredentials': {
+                    'username': self.default_domain_user['name'],
+                    'password': self.default_domain_user['password'],
+                },
+            },
+        }
+        r = self.admin_request(method='POST', path='/v2.0/tokens', body=body)
+        return r
+
+    def test_validate_v2_unscoped_token_with_v3_api(self):
+        v2_token = self.get_v2_token().result['access']['token']['id']
+        auth_data = self.build_authentication_request(token=v2_token)
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidUnscopedTokenResponse(r)
+
+    def test_validate_v2_scoped_token_with_v3_api(self):
+        v2_response = self.get_v2_token(
+            tenant_id=self.default_domain_project['id'])
+        result = v2_response.result
+        v2_token = result['access']['token']['id']
+        auth_data = self.build_authentication_request(
+            token=v2_token,
+            project_id=self.default_domain_project['id'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidScopedTokenResponse(r)
+
+    def test_invalid_user_id(self):
+        auth_data = self.build_authentication_request(
+            user_id=uuid.uuid4().hex,
+            password=self.user['password'])
+        self.v3_authenticate_token(auth_data, expected_status=401)
+
+    def test_invalid_user_name(self):
+        auth_data = self.build_authentication_request(
+            username=uuid.uuid4().hex,
+            user_domain_id=self.domain['id'],
+            password=self.user['password'])
+        self.v3_authenticate_token(auth_data, expected_status=401)
+
+    def test_invalid_domain_id(self):
+        auth_data = self.build_authentication_request(
+            username=self.user['name'],
+            user_domain_id=uuid.uuid4().hex,
+            password=self.user['password'])
+        self.v3_authenticate_token(auth_data, expected_status=401)
+
+    def test_invalid_domain_name(self):
+        auth_data = self.build_authentication_request(
+            username=self.user['name'],
+            user_domain_name=uuid.uuid4().hex,
+            password=self.user['password'])
+        self.v3_authenticate_token(auth_data, expected_status=401)
+
+    def test_invalid_password(self):
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=uuid.uuid4().hex)
+        self.v3_authenticate_token(auth_data, expected_status=401)
+
+    def test_remote_user_no_realm(self):
+        CONF.auth.methods = 'external'
+        api = auth.controllers.Auth()
+        context, auth_info, auth_context = self.build_external_auth_request(
+            self.default_domain_user['name'])
+        api.authenticate(context, auth_info, auth_context)
+        self.assertEqual(auth_context['user_id'],
+                         self.default_domain_user['id'])
+        # Now test to make sure the user name can, itself, contain the
+        # '@' character.
+        user = {'name': 'myname@mydivision'}
+        self.identity_api.update_user(self.default_domain_user['id'], user)
+        context, auth_info, auth_context = self.build_external_auth_request(
+            user["name"])
+        api.authenticate(context, auth_info, auth_context)
+        self.assertEqual(auth_context['user_id'],
+                         self.default_domain_user['id'])
+
+    def test_remote_user_no_domain(self):
+        api = auth.controllers.Auth()
+        context, auth_info, auth_context = self.build_external_auth_request(
+            self.user['name'])
+        self.assertRaises(exception.Unauthorized,
+                          api.authenticate,
+                          context,
+                          auth_info,
+                          auth_context)
+
+    def test_remote_user_and_password(self):
+        # both REMOTE_USER and password methods must pass.
+        # note that they do not have to match
+        api = auth.controllers.Auth()
+        auth_data = self.build_authentication_request(
+            user_domain_id=self.default_domain_user['domain_id'],
+            username=self.default_domain_user['name'],
+            password=self.default_domain_user['password'])['auth']
+        context, auth_info, auth_context = self.build_external_auth_request(
+            self.default_domain_user['name'], auth_data=auth_data)
+
+        api.authenticate(context, auth_info, auth_context)
+
+    def test_remote_user_and_explicit_external(self):
+        # both REMOTE_USER and password methods must pass.
+        # note that they do not have to match
+        auth_data = self.build_authentication_request(
+            user_domain_id=self.domain['id'],
+            username=self.user['name'],
+            password=self.user['password'])['auth']
+        auth_data['identity']['methods'] = ["password", "external"]
+        auth_data['identity']['external'] = {}
+        api = auth.controllers.Auth()
+        auth_info = auth.controllers.AuthInfo(None, auth_data)
+        auth_context = {'extras': {}, 'method_names': []}
+        self.assertRaises(exception.Unauthorized,
+                          api.authenticate,
+                          self.empty_context,
+                          auth_info,
+                          auth_context)
+
+    def test_remote_user_bad_password(self):
+        # both REMOTE_USER and password methods must pass.
+        api = auth.controllers.Auth()
+        auth_data = self.build_authentication_request(
+            user_domain_id=self.domain['id'],
+            username=self.user['name'],
+            password='badpassword')['auth']
+        context, auth_info, auth_context = self.build_external_auth_request(
+            self.default_domain_user['name'], auth_data=auth_data)
+        self.assertRaises(exception.Unauthorized,
+                          api.authenticate,
+                          context,
+                          auth_info,
+                          auth_context)
+
+    def test_bind_not_set_with_remote_user(self):
+        self.config_fixture.config(group='token', bind=[])
+        auth_data = self.build_authentication_request()
+        remote_user = self.default_domain_user['name']
+        self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
+                                             'AUTH_TYPE': 'Negotiate'})
+        r = self.v3_authenticate_token(auth_data)
+        token = self.assertValidUnscopedTokenResponse(r)
+        self.assertNotIn('bind', token)
+
+    # TODO(ayoung): move to TestPKITokenAPIs; it will be run for both formats
+    def test_verify_with_bound_token(self):
+        self.config_fixture.config(group='token', bind='kerberos')
+        auth_data = self.build_authentication_request(
+            project_id=self.project['id'])
+        remote_user = self.default_domain_user['name']
+        self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
+                                             'AUTH_TYPE': 'Negotiate'})
+
+        token = self.get_requested_token(auth_data)
+        headers = {'X-Subject-Token': token}
+        r = self.get('/auth/tokens', headers=headers, token=token)
+        token = self.assertValidProjectScopedTokenResponse(r)
+        self.assertEqual(token['bind']['kerberos'],
+                         self.default_domain_user['name'])
+
+    def test_auth_with_bind_token(self):
+        self.config_fixture.config(group='token', bind=['kerberos'])
+
+        auth_data = self.build_authentication_request()
+        remote_user = self.default_domain_user['name']
+        self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
+                                             'AUTH_TYPE': 'Negotiate'})
+        r = self.v3_authenticate_token(auth_data)
+
+        # the unscoped token should have bind information in it
+        token = self.assertValidUnscopedTokenResponse(r)
+        self.assertEqual(token['bind']['kerberos'], remote_user)
+
+        token = r.headers.get('X-Subject-Token')
+
+        # using unscoped token with remote user succeeds
+        auth_params = {'token': token, 'project_id': self.project_id}
+        auth_data = self.build_authentication_request(**auth_params)
+        r = self.v3_authenticate_token(auth_data)
+        token = self.assertValidProjectScopedTokenResponse(r)
+
+        # the bind information should be carried over from the original token
+        self.assertEqual(token['bind']['kerberos'], remote_user)
+
+    def test_v2_v3_bind_token_intermix(self):
+        self.config_fixture.config(group='token', bind='kerberos')
+
+        # we need our own user registered to the default domain because of
+        # the way external auth works.
+        remote_user = self.default_domain_user['name']
+        self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
+                                             'AUTH_TYPE': 'Negotiate'})
+        body = {'auth': {}}
+        resp = self.admin_request(path='/v2.0/tokens',
+                                  method='POST',
+                                  body=body)
+
+        v2_token_data = resp.result
+
+        bind = v2_token_data['access']['token']['bind']
+        self.assertEqual(bind['kerberos'], self.default_domain_user['name'])
+
+        v2_token_id = v2_token_data['access']['token']['id']
+        # NOTE(gyee): self.get() will try to obtain an auth token if one
+        # is not provided. When REMOTE_USER is present in the request
+        # environment, the external user auth plugin is used in conjunction
+        # with the password auth for the admin user. Therefore, we need to
+        # cleanup the REMOTE_USER information from the previous call.
+        del self.admin_app.extra_environ['REMOTE_USER']
+        headers = {'X-Subject-Token': v2_token_id}
+        resp = self.get('/auth/tokens', headers=headers)
+        token_data = resp.result
+
+        self.assertDictEqual(v2_token_data['access']['token']['bind'],
+                             token_data['token']['bind'])
+
+    def test_authenticating_a_user_with_no_password(self):
+        user = self.new_user_ref(domain_id=self.domain['id'])
+        user.pop('password', None)  # can't have a password for this test
+        user = self.identity_api.create_user(user)
+
+        auth_data = self.build_authentication_request(
+            user_id=user['id'],
+            password='password')
+
+        self.v3_authenticate_token(auth_data, expected_status=401)
+
+    def test_disabled_default_project_result_in_unscoped_token(self):
+        # create a disabled project to work with
+        project = self.create_new_default_project_for_user(
+            self.user['id'], self.domain_id, enable_project=False)
+
+        # assign a role to user for the new project
+        self.assignment_api.add_role_to_user_and_project(self.user['id'],
+                                                         project['id'],
+                                                         self.role_id)
+
+        # attempt to authenticate without requesting a project
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidUnscopedTokenResponse(r)
+
+    def test_disabled_default_project_domain_result_in_unscoped_token(self):
+        domain_ref = self.new_domain_ref()
+        r = self.post('/domains', body={'domain': domain_ref})
+        domain = self.assertValidDomainResponse(r, domain_ref)
+
+        project = self.create_new_default_project_for_user(
+            self.user['id'], domain['id'])
+
+        # assign a role to user for the new project
+        self.assignment_api.add_role_to_user_and_project(self.user['id'],
+                                                         project['id'],
+                                                         self.role_id)
+
+        # now disable the project domain
+        body = {'domain': {'enabled': False}}
+        r = self.patch('/domains/%(domain_id)s' % {'domain_id': domain['id']},
+                       body=body)
+        self.assertValidDomainResponse(r)
+
+        # attempt to authenticate without requesting a project
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidUnscopedTokenResponse(r)
+
+    def test_no_access_to_default_project_result_in_unscoped_token(self):
+        # create a disabled project to work with
+        self.create_new_default_project_for_user(self.user['id'],
+                                                 self.domain_id)
+
+        # attempt to authenticate without requesting a project
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidUnscopedTokenResponse(r)
+
+    def test_disabled_scope_project_domain_result_in_401(self):
+        # create a disabled domain
+        domain = self.new_domain_ref()
+        domain['enabled'] = False
+        self.resource_api.create_domain(domain['id'], domain)
+
+        # create a project in the disabled domain
+        project = self.new_project_ref(domain_id=domain['id'])
+        self.resource_api.create_project(project['id'], project)
+
+        # assign some role to self.user for the project in the disabled domain
+        self.assignment_api.add_role_to_user_and_project(
+            self.user['id'],
+            project['id'],
+            self.role_id)
+
+        # user should not be able to auth with project_id
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=project['id'])
+        self.v3_authenticate_token(auth_data, expected_status=401)
+
+        # user should not be able to auth with project_name & domain
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_name=project['name'],
+            project_domain_id=domain['id'])
+        self.v3_authenticate_token(auth_data, expected_status=401)
+
+    def test_auth_methods_with_different_identities_fails(self):
+        # get the token for a user. This is self.user which is different from
+        # self.default_domain_user.
+        token = self.get_scoped_token()
+        # try both password and token methods with different identities and it
+        # should fail
+        auth_data = self.build_authentication_request(
+            token=token,
+            user_id=self.default_domain_user['id'],
+            password=self.default_domain_user['password'])
+        self.v3_authenticate_token(auth_data, expected_status=401)
+
+
+class TestAuthJSONExternal(test_v3.RestfulTestCase):
+    content_type = 'json'
+
+    def config_overrides(self):
+        super(TestAuthJSONExternal, self).config_overrides()
+        self.config_fixture.config(group='auth', methods='')
+
+    def auth_plugin_config_override(self, methods=None, **method_classes):
+        self.config_fixture.config(group='auth', methods='')
+
+    def test_remote_user_no_method(self):
+        api = auth.controllers.Auth()
+        context, auth_info, auth_context = self.build_external_auth_request(
+            self.default_domain_user['name'])
+        self.assertRaises(exception.Unauthorized,
+                          api.authenticate,
+                          context,
+                          auth_info,
+                          auth_context)
+
+
+class TestTrustOptional(test_v3.RestfulTestCase):
+    def config_overrides(self):
+        super(TestTrustOptional, self).config_overrides()
+        self.config_fixture.config(group='trust', enabled=False)
+
+    def test_trusts_404(self):
+        self.get('/OS-TRUST/trusts', body={'trust': {}}, expected_status=404)
+        self.post('/OS-TRUST/trusts', body={'trust': {}}, expected_status=404)
+
+    def test_auth_with_scope_in_trust_403(self):
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            trust_id=uuid.uuid4().hex)
+        self.v3_authenticate_token(auth_data, expected_status=403)
+
+
+class TestTrustRedelegation(test_v3.RestfulTestCase):
+    """Redelegation valid and secure
+
+    Redelegation is a hierarchical structure of trusts between initial trustor
+    and a group of users allowed to impersonate trustor and act in his name.
+    Hierarchy is created in a process of trusting already trusted permissions
+    and organized as an adjacency list using 'redelegated_trust_id' field.
+    Redelegation is valid if each subsequent trust in a chain passes 'not more'
+    permissions than being redelegated.
+
+    Trust constraints are:
+     * roles - set of roles trusted by trustor
+     * expiration_time
+     * allow_redelegation - a flag
+     * redelegation_count - decreasing value restricting length of trust chain
+     * remaining_uses - DISALLOWED when allow_redelegation == True
+
+    Trust becomes invalid in case:
+     * trust roles were revoked from trustor
+     * one of the users in the delegation chain was disabled or deleted
+     * expiration time passed
+     * one of the parent trusts has become invalid
+     * one of the parent trusts was deleted
+
+    """
+
+    def config_overrides(self):
+        super(TestTrustRedelegation, self).config_overrides()
+        self.config_fixture.config(
+            group='trust',
+            enabled=True,
+            allow_redelegation=True,
+            max_redelegation_count=10
+        )
+
+    def setUp(self):
+        super(TestTrustRedelegation, self).setUp()
+        # Create a trustee to delegate stuff to
+        trustee_user_ref = self.new_user_ref(domain_id=self.domain_id)
+        self.trustee_user = self.identity_api.create_user(trustee_user_ref)
+        self.trustee_user['password'] = trustee_user_ref['password']
+
+        # trustor->trustee
+        self.redelegated_trust_ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user['id'],
+            project_id=self.project_id,
+            impersonation=True,
+            expires=dict(minutes=1),
+            role_ids=[self.role_id],
+            allow_redelegation=True)
+
+        # trustor->trustee (no redelegation)
+        self.chained_trust_ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user['id'],
+            project_id=self.project_id,
+            impersonation=True,
+            role_ids=[self.role_id],
+            allow_redelegation=True)
+
+    def _get_trust_token(self, trust):
+        trust_id = trust['id']
+        auth_data = self.build_authentication_request(
+            user_id=self.trustee_user['id'],
+            password=self.trustee_user['password'],
+            trust_id=trust_id)
+        trust_token = self.get_requested_token(auth_data)
+        return trust_token
+
+    def test_depleted_redelegation_count_error(self):
+        self.redelegated_trust_ref['redelegation_count'] = 0
+        r = self.post('/OS-TRUST/trusts',
+                      body={'trust': self.redelegated_trust_ref})
+        trust = self.assertValidTrustResponse(r)
+        trust_token = self._get_trust_token(trust)
+
+        # Attempt to create a redelegated trust.
+        self.post('/OS-TRUST/trusts',
+                  body={'trust': self.chained_trust_ref},
+                  token=trust_token,
+                  expected_status=403)
+
+    def test_modified_redelegation_count_error(self):
+        r = self.post('/OS-TRUST/trusts',
+                      body={'trust': self.redelegated_trust_ref})
+        trust = self.assertValidTrustResponse(r)
+        trust_token = self._get_trust_token(trust)
+
+        # Attempt to create a redelegated trust with incorrect
+        # redelegation_count.
+        correct = trust['redelegation_count'] - 1
+        incorrect = correct - 1
+        self.chained_trust_ref['redelegation_count'] = incorrect
+        self.post('/OS-TRUST/trusts',
+                  body={'trust': self.chained_trust_ref},
+                  token=trust_token,
+                  expected_status=403)
+
+    def test_max_redelegation_count_constraint(self):
+        incorrect = CONF.trust.max_redelegation_count + 1
+        self.redelegated_trust_ref['redelegation_count'] = incorrect
+        self.post('/OS-TRUST/trusts',
+                  body={'trust': self.redelegated_trust_ref},
+                  expected_status=403)
+
+    def test_redelegation_expiry(self):
+        r = self.post('/OS-TRUST/trusts',
+                      body={'trust': self.redelegated_trust_ref})
+        trust = self.assertValidTrustResponse(r)
+        trust_token = self._get_trust_token(trust)
+
+        # Attempt to create a redelegated trust supposed to last longer
+        # than the parent trust: let's give it 10 minutes (>1 minute).
+        too_long_live_chained_trust_ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user['id'],
+            project_id=self.project_id,
+            impersonation=True,
+            expires=dict(minutes=10),
+            role_ids=[self.role_id])
+        self.post('/OS-TRUST/trusts',
+                  body={'trust': too_long_live_chained_trust_ref},
+                  token=trust_token,
+                  expected_status=403)
+
+    def test_redelegation_remaining_uses(self):
+        r = self.post('/OS-TRUST/trusts',
+                      body={'trust': self.redelegated_trust_ref})
+        trust = self.assertValidTrustResponse(r)
+        trust_token = self._get_trust_token(trust)
+
+        # Attempt to create a redelegated trust with remaining_uses defined.
+        # It must fail according to specification: remaining_uses must be
+        # omitted for trust redelegation. Any number here.
+        self.chained_trust_ref['remaining_uses'] = 5
+        self.post('/OS-TRUST/trusts',
+                  body={'trust': self.chained_trust_ref},
+                  token=trust_token,
+                  expected_status=403)
+
+    def test_roles_subset(self):
+        # Build second role
+        role = self.new_role_ref()
+        self.assignment_api.create_role(role['id'], role)
+        # assign a new role to the user
+        self.assignment_api.create_grant(role_id=role['id'],
+                                         user_id=self.user_id,
+                                         project_id=self.project_id)
+
+        # Create first trust with extended set of roles
+        ref = self.redelegated_trust_ref
+        ref['roles'].append({'id': role['id']})
+        r = self.post('/OS-TRUST/trusts',
+                      body={'trust': ref})
+        trust = self.assertValidTrustResponse(r)
+        # Trust created with exact set of roles (checked by role id)
+        role_id_set = set(r['id'] for r in ref['roles'])
+        trust_role_id_set = set(r['id'] for r in trust['roles'])
+        self.assertEqual(role_id_set, trust_role_id_set)
+
+        trust_token = self._get_trust_token(trust)
+
+        # Chain second trust with roles subset
+        r = self.post('/OS-TRUST/trusts',
+                      body={'trust': self.chained_trust_ref},
+                      token=trust_token)
+        trust2 = self.assertValidTrustResponse(r)
+        # First trust contains roles superset
+        # Second trust contains roles subset
+        role_id_set1 = set(r['id'] for r in trust['roles'])
+        role_id_set2 = set(r['id'] for r in trust2['roles'])
+        self.assertThat(role_id_set1, matchers.GreaterThan(role_id_set2))
+
+    def test_redelegate_with_role_by_name(self):
+        # For role by name testing
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user['id'],
+            project_id=self.project_id,
+            impersonation=True,
+            expires=dict(minutes=1),
+            role_names=[self.role['name']],
+            allow_redelegation=True)
+        r = self.post('/OS-TRUST/trusts',
+                      body={'trust': ref})
+        trust = self.assertValidTrustResponse(r)
+        # Ensure we can get a token with this trust
+        trust_token = self._get_trust_token(trust)
+        # Chain second trust with roles subset
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user['id'],
+            project_id=self.project_id,
+            impersonation=True,
+            role_names=[self.role['name']],
+            allow_redelegation=True)
+        r = self.post('/OS-TRUST/trusts',
+                      body={'trust': ref},
+                      token=trust_token)
+        trust = self.assertValidTrustResponse(r)
+        # Ensure we can get a token with this trust
+        self._get_trust_token(trust)
+
+    def test_redelegate_new_role_fails(self):
+        r = self.post('/OS-TRUST/trusts',
+                      body={'trust': self.redelegated_trust_ref})
+        trust = self.assertValidTrustResponse(r)
+        trust_token = self._get_trust_token(trust)
+
+        # Build second trust with a role not in parent's roles
+        role = self.new_role_ref()
+        self.assignment_api.create_role(role['id'], role)
+        # assign a new role to the user
+        self.assignment_api.create_grant(role_id=role['id'],
+                                         user_id=self.user_id,
+                                         project_id=self.project_id)
+
+        # Try to chain a trust with the role not from parent trust
+        self.chained_trust_ref['roles'] = [{'id': role['id']}]
+
+        # Bypass policy enforcement
+        with mock.patch.object(rules, 'enforce', return_value=True):
+            self.post('/OS-TRUST/trusts',
+                      body={'trust': self.chained_trust_ref},
+                      token=trust_token,
+                      expected_status=403)
+
+    def test_redelegation_terminator(self):
+        r = self.post('/OS-TRUST/trusts',
+                      body={'trust': self.redelegated_trust_ref})
+        trust = self.assertValidTrustResponse(r)
+        trust_token = self._get_trust_token(trust)
+
+        # Build second trust - the terminator
+        ref = dict(self.chained_trust_ref,
+                   redelegation_count=1,
+                   allow_redelegation=False)
+
+        r = self.post('/OS-TRUST/trusts',
+                      body={'trust': ref},
+                      token=trust_token)
+
+        trust = self.assertValidTrustResponse(r)
+        # Check that allow_redelegation == False caused redelegation_count
+        # to be set to 0, while allow_redelegation is removed
+        self.assertNotIn('allow_redelegation', trust)
+        self.assertEqual(trust['redelegation_count'], 0)
+        trust_token = self._get_trust_token(trust)
+
+        # Build third trust, same as second
+        self.post('/OS-TRUST/trusts',
+                  body={'trust': ref},
+                  token=trust_token,
+                  expected_status=403)
+
+
+class TestTrustChain(test_v3.RestfulTestCase):
+
+    def config_overrides(self):
+        super(TestTrustChain, self).config_overrides()
+        self.config_fixture.config(
+            group='trust',
+            enabled=True,
+            allow_redelegation=True,
+            max_redelegation_count=10
+        )
+
+    def setUp(self):
+        super(TestTrustChain, self).setUp()
+        # Create trust chain
+        self.user_chain = list()
+        self.trust_chain = list()
+        for _ in xrange(3):
+            user_ref = self.new_user_ref(domain_id=self.domain_id)
+            user = self.identity_api.create_user(user_ref)
+            user['password'] = user_ref['password']
+            self.user_chain.append(user)
+
+        # trustor->trustee
+        trustee = self.user_chain[0]
+        trust_ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=trustee['id'],
+            project_id=self.project_id,
+            impersonation=True,
+            expires=dict(minutes=1),
+            role_ids=[self.role_id])
+        trust_ref.update(
+            allow_redelegation=True,
+            redelegation_count=3)
+
+        r = self.post('/OS-TRUST/trusts',
+                      body={'trust': trust_ref})
+
+        trust = self.assertValidTrustResponse(r)
+        auth_data = self.build_authentication_request(
+            user_id=trustee['id'],
+            password=trustee['password'],
+            trust_id=trust['id'])
+        trust_token = self.get_requested_token(auth_data)
+        self.trust_chain.append(trust)
+
+        for trustee in self.user_chain[1:]:
+            trust_ref = self.new_trust_ref(
+                trustor_user_id=self.user_id,
+                trustee_user_id=trustee['id'],
+                project_id=self.project_id,
+                impersonation=True,
+                role_ids=[self.role_id])
+            trust_ref.update(
+                allow_redelegation=True)
+            r = self.post('/OS-TRUST/trusts',
+                          body={'trust': trust_ref},
+                          token=trust_token)
+            trust = self.assertValidTrustResponse(r)
+            auth_data = self.build_authentication_request(
+                user_id=trustee['id'],
+                password=trustee['password'],
+                trust_id=trust['id'])
+            trust_token = self.get_requested_token(auth_data)
+            self.trust_chain.append(trust)
+
+        trustee = self.user_chain[-1]
+        trust = self.trust_chain[-1]
+        auth_data = self.build_authentication_request(
+            user_id=trustee['id'],
+            password=trustee['password'],
+            trust_id=trust['id'])
+
+        self.last_token = self.get_requested_token(auth_data)
+
+    def assert_user_authenticate(self, user):
+        auth_data = self.build_authentication_request(
+            user_id=user['id'],
+            password=user['password']
+        )
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidTokenResponse(r)
+
+    def assert_trust_tokens_revoked(self, trust_id):
+        trustee = self.user_chain[0]
+        auth_data = self.build_authentication_request(
+            user_id=trustee['id'],
+            password=trustee['password']
+        )
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidTokenResponse(r)
+
+        revocation_response = self.get('/OS-REVOKE/events')
+        revocation_events = revocation_response.json_body['events']
+        found = False
+        for event in revocation_events:
+            if event.get('OS-TRUST:trust_id') == trust_id:
+                found = True
+        self.assertTrue(found, 'event with trust_id %s not found in list' %
+                        trust_id)
+
+    def test_delete_trust_cascade(self):
+        self.assert_user_authenticate(self.user_chain[0])
+        self.delete('/OS-TRUST/trusts/%(trust_id)s' % {
+            'trust_id': self.trust_chain[0]['id']},
+            expected_status=204)
+
+        headers = {'X-Subject-Token': self.last_token}
+        self.head('/auth/tokens', headers=headers, expected_status=404)
+        self.assert_trust_tokens_revoked(self.trust_chain[0]['id'])
+
+    def test_delete_broken_chain(self):
+        self.assert_user_authenticate(self.user_chain[0])
+        self.delete('/OS-TRUST/trusts/%(trust_id)s' % {
+            'trust_id': self.trust_chain[1]['id']},
+            expected_status=204)
+
+        self.delete('/OS-TRUST/trusts/%(trust_id)s' % {
+            'trust_id': self.trust_chain[0]['id']},
+            expected_status=204)
+
+    def test_trustor_roles_revoked(self):
+        self.assert_user_authenticate(self.user_chain[0])
+
+        self.assignment_api.remove_role_from_user_and_project(
+            self.user_id, self.project_id, self.role_id
+        )
+
+        auth_data = self.build_authentication_request(
+            token=self.last_token,
+            trust_id=self.trust_chain[-1]['id'])
+        self.v3_authenticate_token(auth_data, expected_status=404)
+
+    def test_intermediate_user_disabled(self):
+        self.assert_user_authenticate(self.user_chain[0])
+
+        disabled = self.user_chain[0]
+        disabled['enabled'] = False
+        self.identity_api.update_user(disabled['id'], disabled)
+
+        # Bypass policy enforcement
+        with mock.patch.object(rules, 'enforce', return_value=True):
+            headers = {'X-Subject-Token': self.last_token}
+            self.head('/auth/tokens', headers=headers, expected_status=403)
+
+    def test_intermediate_user_deleted(self):
+        self.assert_user_authenticate(self.user_chain[0])
+
+        self.identity_api.delete_user(self.user_chain[0]['id'])
+
+        # Bypass policy enforcement
+        with mock.patch.object(rules, 'enforce', return_value=True):
+            headers = {'X-Subject-Token': self.last_token}
+            self.head('/auth/tokens', headers=headers, expected_status=403)
+
+
+class TestTrustAuth(test_v3.RestfulTestCase):
+    EXTENSION_NAME = 'revoke'
+    EXTENSION_TO_ADD = 'revoke_extension'
+
+    def config_overrides(self):
+        super(TestTrustAuth, self).config_overrides()
+        self.config_fixture.config(
+            group='revoke',
+            driver='keystone.contrib.revoke.backends.kvs.Revoke')
+        self.config_fixture.config(
+            group='token',
+            provider='keystone.token.providers.pki.Provider',
+            revoke_by_id=False)
+        self.config_fixture.config(group='trust', enabled=True)
+
+    def setUp(self):
+        super(TestTrustAuth, self).setUp()
+
+        # create a trustee to delegate stuff to
+        self.trustee_user = self.new_user_ref(domain_id=self.domain_id)
+        password = self.trustee_user['password']
+        self.trustee_user = self.identity_api.create_user(self.trustee_user)
+        self.trustee_user['password'] = password
+        self.trustee_user_id = self.trustee_user['id']
+
+    def test_create_trust_400(self):
+        # The server returns a 403 Forbidden rather than a 400, see bug 1133435
+        self.post('/OS-TRUST/trusts', body={'trust': {}}, expected_status=403)
+
+    def test_create_unscoped_trust(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id)
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+        self.assertValidTrustResponse(r, ref)
+
+    def test_create_trust_no_roles(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id)
+        self.post('/OS-TRUST/trusts', body={'trust': ref}, expected_status=403)
+
+    def _initialize_test_consume_trust(self, count):
+        # Make sure remaining_uses is decremented as we consume the trust
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            remaining_uses=count,
+            role_ids=[self.role_id])
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+        # make sure the trust exists
+        trust = self.assertValidTrustResponse(r, ref)
+        r = self.get(
+            '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']},
+            expected_status=200)
+        # get a token for the trustee
+        auth_data = self.build_authentication_request(
+            user_id=self.trustee_user['id'],
+            password=self.trustee_user['password'])
+        r = self.v3_authenticate_token(auth_data)
+        token = r.headers.get('X-Subject-Token')
+        # get a trust token, consume one use
+        auth_data = self.build_authentication_request(
+            token=token,
+            trust_id=trust['id'])
+        r = self.v3_authenticate_token(auth_data)
+        return trust
+
+    def test_consume_trust_once(self):
+        trust = self._initialize_test_consume_trust(2)
+        # check decremented value
+        r = self.get(
+            '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']},
+            expected_status=200)
+        trust = r.result.get('trust')
+        self.assertIsNotNone(trust)
+        self.assertEqual(trust['remaining_uses'], 1)
+
+    def test_create_one_time_use_trust(self):
+        trust = self._initialize_test_consume_trust(1)
+        # No more uses, the trust is made unavailable
+        self.get(
+            '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']},
+            expected_status=404)
+        # this time we can't get a trust token
+        auth_data = self.build_authentication_request(
+            user_id=self.trustee_user['id'],
+            password=self.trustee_user['password'],
+            trust_id=trust['id'])
+        self.v3_authenticate_token(auth_data, expected_status=401)
+
+    def test_create_trust_with_bad_values_for_remaining_uses(self):
+        # negative values for the remaining_uses parameter are forbidden
+        self._create_trust_with_bad_remaining_use(bad_value=-1)
+        # 0 is a forbidden value as well
+        self._create_trust_with_bad_remaining_use(bad_value=0)
+        # as are non integer values
+        self._create_trust_with_bad_remaining_use(bad_value="a bad value")
+        self._create_trust_with_bad_remaining_use(bad_value=7.2)
+
+    def _create_trust_with_bad_remaining_use(self, bad_value):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            remaining_uses=bad_value,
+            role_ids=[self.role_id])
+        self.post('/OS-TRUST/trusts',
+                  body={'trust': ref},
+                  expected_status=400)
+
+    def test_invalid_trust_request_without_impersonation(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            role_ids=[self.role_id])
+
+        del ref['impersonation']
+
+        self.post('/OS-TRUST/trusts',
+                  body={'trust': ref},
+                  expected_status=400)
+
+    def test_invalid_trust_request_without_trustee(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            role_ids=[self.role_id])
+
+        del ref['trustee_user_id']
+
+        self.post('/OS-TRUST/trusts',
+                  body={'trust': ref},
+                  expected_status=400)
+
+    def test_create_unlimited_use_trust(self):
+        # by default trusts are unlimited in terms of tokens that can be
+        # generated from them, this test creates such a trust explicitly
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            remaining_uses=None,
+            role_ids=[self.role_id])
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+        trust = self.assertValidTrustResponse(r, ref)
+
+        r = self.get(
+            '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']},
+            expected_status=200)
+        auth_data = self.build_authentication_request(
+            user_id=self.trustee_user['id'],
+            password=self.trustee_user['password'])
+        r = self.v3_authenticate_token(auth_data)
+        token = r.headers.get('X-Subject-Token')
+        auth_data = self.build_authentication_request(
+            token=token,
+            trust_id=trust['id'])
+        r = self.v3_authenticate_token(auth_data)
+        r = self.get(
+            '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']},
+            expected_status=200)
+        trust = r.result.get('trust')
+        self.assertIsNone(trust['remaining_uses'])
+
+    def test_trust_crud(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            role_ids=[self.role_id])
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+        trust = self.assertValidTrustResponse(r, ref)
+
+        r = self.get(
+            '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']},
+            expected_status=200)
+        self.assertValidTrustResponse(r, ref)
+
+        # validate roles on the trust
+        r = self.get(
+            '/OS-TRUST/trusts/%(trust_id)s/roles' % {
+                'trust_id': trust['id']},
+            expected_status=200)
+        roles = self.assertValidRoleListResponse(r, self.role)
+        self.assertIn(self.role['id'], [x['id'] for x in roles])
+        self.head(
+            '/OS-TRUST/trusts/%(trust_id)s/roles/%(role_id)s' % {
+                'trust_id': trust['id'],
+                'role_id': self.role['id']},
+            expected_status=200)
+        r = self.get(
+            '/OS-TRUST/trusts/%(trust_id)s/roles/%(role_id)s' % {
+                'trust_id': trust['id'],
+                'role_id': self.role['id']},
+            expected_status=200)
+        self.assertValidRoleResponse(r, self.role)
+
+        r = self.get('/OS-TRUST/trusts', expected_status=200)
+        self.assertValidTrustListResponse(r, trust)
+
+        # trusts are immutable
+        self.patch(
+            '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']},
+            body={'trust': ref},
+            expected_status=404)
+
+        self.delete(
+            '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']},
+            expected_status=204)
+
+        self.get(
+            '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']},
+            expected_status=404)
+
+    def test_create_trust_trustee_404(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=uuid.uuid4().hex,
+            project_id=self.project_id,
+            role_ids=[self.role_id])
+        self.post('/OS-TRUST/trusts', body={'trust': ref}, expected_status=404)
+
+    def test_create_trust_trustor_trustee_backwards(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.trustee_user_id,
+            trustee_user_id=self.user_id,
+            project_id=self.project_id,
+            role_ids=[self.role_id])
+        self.post('/OS-TRUST/trusts', body={'trust': ref}, expected_status=403)
+
+    def test_create_trust_project_404(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=uuid.uuid4().hex,
+            role_ids=[self.role_id])
+        self.post('/OS-TRUST/trusts', body={'trust': ref}, expected_status=404)
+
+    def test_create_trust_role_id_404(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            role_ids=[uuid.uuid4().hex])
+        self.post('/OS-TRUST/trusts', body={'trust': ref}, expected_status=404)
+
+    def test_create_trust_role_name_404(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            role_names=[uuid.uuid4().hex])
+        self.post('/OS-TRUST/trusts', body={'trust': ref}, expected_status=404)
+
+    def test_create_expired_trust(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            expires=dict(seconds=-1),
+            role_ids=[self.role_id])
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+        trust = self.assertValidTrustResponse(r, ref)
+
+        self.get('/OS-TRUST/trusts/%(trust_id)s' % {
+            'trust_id': trust['id']},
+            expected_status=404)
+
+        auth_data = self.build_authentication_request(
+            user_id=self.trustee_user['id'],
+            password=self.trustee_user['password'],
+            trust_id=trust['id'])
+        self.v3_authenticate_token(auth_data, expected_status=401)
+
+    def test_v3_v2_intermix_trustor_not_in_default_domain_failed(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.default_domain_user_id,
+            project_id=self.project_id,
+            impersonation=False,
+            expires=dict(minutes=1),
+            role_ids=[self.role_id])
+
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+        trust = self.assertValidTrustResponse(r)
+
+        auth_data = self.build_authentication_request(
+            user_id=self.default_domain_user['id'],
+            password=self.default_domain_user['password'],
+            trust_id=trust['id'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidProjectTrustScopedTokenResponse(
+            r, self.default_domain_user)
+
+        token = r.headers.get('X-Subject-Token')
+
+        # now validate the v3 token with v2 API
+        path = '/v2.0/tokens/%s' % (token)
+        self.admin_request(
+            path=path, token='ADMIN', method='GET', expected_status=401)
+
+    def test_v3_v2_intermix_trustor_not_in_default_domaini_failed(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.default_domain_user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.default_domain_project_id,
+            impersonation=False,
+            expires=dict(minutes=1),
+            role_ids=[self.role_id])
+
+        auth_data = self.build_authentication_request(
+            user_id=self.default_domain_user['id'],
+            password=self.default_domain_user['password'],
+            project_id=self.default_domain_project_id)
+        token = self.get_requested_token(auth_data)
+
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref}, token=token)
+        trust = self.assertValidTrustResponse(r)
+
+        auth_data = self.build_authentication_request(
+            user_id=self.trustee_user['id'],
+            password=self.trustee_user['password'],
+            trust_id=trust['id'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidProjectTrustScopedTokenResponse(
+            r, self.trustee_user)
+        token = r.headers.get('X-Subject-Token')
+
+        # now validate the v3 token with v2 API
+        path = '/v2.0/tokens/%s' % (token)
+        self.admin_request(
+            path=path, token='ADMIN', method='GET', expected_status=401)
+
+    def test_v3_v2_intermix_project_not_in_default_domaini_failed(self):
+        # create a trustee in default domain to delegate stuff to
+        trustee_user = self.new_user_ref(domain_id=test_v3.DEFAULT_DOMAIN_ID)
+        password = trustee_user['password']
+        trustee_user = self.identity_api.create_user(trustee_user)
+        trustee_user['password'] = password
+        trustee_user_id = trustee_user['id']
+
+        ref = self.new_trust_ref(
+            trustor_user_id=self.default_domain_user_id,
+            trustee_user_id=trustee_user_id,
+            project_id=self.project_id,
+            impersonation=False,
+            expires=dict(minutes=1),
+            role_ids=[self.role_id])
+
+        auth_data = self.build_authentication_request(
+            user_id=self.default_domain_user['id'],
+            password=self.default_domain_user['password'],
+            project_id=self.default_domain_project_id)
+        token = self.get_requested_token(auth_data)
+
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref}, token=token)
+        trust = self.assertValidTrustResponse(r)
+
+        auth_data = self.build_authentication_request(
+            user_id=trustee_user['id'],
+            password=trustee_user['password'],
+            trust_id=trust['id'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidProjectTrustScopedTokenResponse(
+            r, trustee_user)
+        token = r.headers.get('X-Subject-Token')
+
+        # now validate the v3 token with v2 API
+        path = '/v2.0/tokens/%s' % (token)
+        self.admin_request(
+            path=path, token='ADMIN', method='GET', expected_status=401)
+
+    def test_v3_v2_intermix(self):
+        # create a trustee in default domain to delegate stuff to
+        trustee_user = self.new_user_ref(domain_id=test_v3.DEFAULT_DOMAIN_ID)
+        password = trustee_user['password']
+        trustee_user = self.identity_api.create_user(trustee_user)
+        trustee_user['password'] = password
+        trustee_user_id = trustee_user['id']
+
+        ref = self.new_trust_ref(
+            trustor_user_id=self.default_domain_user_id,
+            trustee_user_id=trustee_user_id,
+            project_id=self.default_domain_project_id,
+            impersonation=False,
+            expires=dict(minutes=1),
+            role_ids=[self.role_id])
+        auth_data = self.build_authentication_request(
+            user_id=self.default_domain_user['id'],
+            password=self.default_domain_user['password'],
+            project_id=self.default_domain_project_id)
+        token = self.get_requested_token(auth_data)
+
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref}, token=token)
+        trust = self.assertValidTrustResponse(r)
+
+        auth_data = self.build_authentication_request(
+            user_id=trustee_user['id'],
+            password=trustee_user['password'],
+            trust_id=trust['id'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidProjectTrustScopedTokenResponse(
+            r, trustee_user)
+        token = r.headers.get('X-Subject-Token')
+
+        # now validate the v3 token with v2 API
+        path = '/v2.0/tokens/%s' % (token)
+        self.admin_request(
+            path=path, token='ADMIN', method='GET', expected_status=200)
+
+    def test_exercise_trust_scoped_token_without_impersonation(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            impersonation=False,
+            expires=dict(minutes=1),
+            role_ids=[self.role_id])
+
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+        trust = self.assertValidTrustResponse(r)
+
+        auth_data = self.build_authentication_request(
+            user_id=self.trustee_user['id'],
+            password=self.trustee_user['password'],
+            trust_id=trust['id'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidProjectTrustScopedTokenResponse(r, self.trustee_user)
+        self.assertEqual(r.result['token']['user']['id'],
+                         self.trustee_user['id'])
+        self.assertEqual(r.result['token']['user']['name'],
+                         self.trustee_user['name'])
+        self.assertEqual(r.result['token']['user']['domain']['id'],
+                         self.domain['id'])
+        self.assertEqual(r.result['token']['user']['domain']['name'],
+                         self.domain['name'])
+        self.assertEqual(r.result['token']['project']['id'],
+                         self.project['id'])
+        self.assertEqual(r.result['token']['project']['name'],
+                         self.project['name'])
+
+    def test_exercise_trust_scoped_token_with_impersonation(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            impersonation=True,
+            expires=dict(minutes=1),
+            role_ids=[self.role_id])
+
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+        trust = self.assertValidTrustResponse(r)
+
+        auth_data = self.build_authentication_request(
+            user_id=self.trustee_user['id'],
+            password=self.trustee_user['password'],
+            trust_id=trust['id'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidProjectTrustScopedTokenResponse(r, self.user)
+        self.assertEqual(r.result['token']['user']['id'], self.user['id'])
+        self.assertEqual(r.result['token']['user']['name'], self.user['name'])
+        self.assertEqual(r.result['token']['user']['domain']['id'],
+                         self.domain['id'])
+        self.assertEqual(r.result['token']['user']['domain']['name'],
+                         self.domain['name'])
+        self.assertEqual(r.result['token']['project']['id'],
+                         self.project['id'])
+        self.assertEqual(r.result['token']['project']['name'],
+                         self.project['name'])
+
+    def test_impersonation_token_cannot_create_new_trust(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            impersonation=True,
+            expires=dict(minutes=1),
+            role_ids=[self.role_id])
+
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+        trust = self.assertValidTrustResponse(r)
+
+        auth_data = self.build_authentication_request(
+            user_id=self.trustee_user['id'],
+            password=self.trustee_user['password'],
+            trust_id=trust['id'])
+
+        trust_token = self.get_requested_token(auth_data)
+
+        # Build second trust
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            impersonation=True,
+            expires=dict(minutes=1),
+            role_ids=[self.role_id])
+
+        self.post('/OS-TRUST/trusts',
+                  body={'trust': ref},
+                  token=trust_token,
+                  expected_status=403)
+
+    def test_trust_deleted_grant(self):
+        # create a new role
+        role = self.new_role_ref()
+        self.role_api.create_role(role['id'], role)
+
+        grant_url = (
+            '/projects/%(project_id)s/users/%(user_id)s/'
+            'roles/%(role_id)s' % {
+                'project_id': self.project_id,
+                'user_id': self.user_id,
+                'role_id': role['id']})
+
+        # assign a new role
+        self.put(grant_url)
+
+        # create a trust that delegates the new role
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            impersonation=False,
+            expires=dict(minutes=1),
+            role_ids=[role['id']])
+
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+        trust = self.assertValidTrustResponse(r)
+
+        # delete the grant
+        self.delete(grant_url)
+
+        # attempt to get a trust token with the deleted grant
+        # and ensure it's unauthorized
+        auth_data = self.build_authentication_request(
+            user_id=self.trustee_user['id'],
+            password=self.trustee_user['password'],
+            trust_id=trust['id'])
+        r = self.v3_authenticate_token(auth_data, expected_status=403)
+
+    def test_trust_chained(self):
+        """Test that a trust token can't be used to execute another trust.
+
+        To do this, we create an A->B->C hierarchy of trusts, then attempt to
+        execute the trusts in series (C->B->A).
+
+        """
+        # create a sub-trustee user
+        sub_trustee_user = self.new_user_ref(
+            domain_id=test_v3.DEFAULT_DOMAIN_ID)
+        password = sub_trustee_user['password']
+        sub_trustee_user = self.identity_api.create_user(sub_trustee_user)
+        sub_trustee_user['password'] = password
+        sub_trustee_user_id = sub_trustee_user['id']
+
+        # create a new role
+        role = self.new_role_ref()
+        self.role_api.create_role(role['id'], role)
+
+        # assign the new role to trustee
+        self.put(
+            '/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
+                'project_id': self.project_id,
+                'user_id': self.trustee_user_id,
+                'role_id': role['id']})
+
+        # create a trust from trustor -> trustee
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            impersonation=True,
+            expires=dict(minutes=1),
+            role_ids=[self.role_id])
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+        trust1 = self.assertValidTrustResponse(r)
+
+        # authenticate as trustee so we can create a second trust
+        auth_data = self.build_authentication_request(
+            user_id=self.trustee_user_id,
+            password=self.trustee_user['password'],
+            project_id=self.project_id)
+        token = self.get_requested_token(auth_data)
+
+        # create a trust from trustee -> sub-trustee
+        ref = self.new_trust_ref(
+            trustor_user_id=self.trustee_user_id,
+            trustee_user_id=sub_trustee_user_id,
+            project_id=self.project_id,
+            impersonation=True,
+            expires=dict(minutes=1),
+            role_ids=[role['id']])
+        r = self.post('/OS-TRUST/trusts', token=token, body={'trust': ref})
+        trust2 = self.assertValidTrustResponse(r)
+
+        # authenticate as sub-trustee and get a trust token
+        auth_data = self.build_authentication_request(
+            user_id=sub_trustee_user['id'],
+            password=sub_trustee_user['password'],
+            trust_id=trust2['id'])
+        trust_token = self.get_requested_token(auth_data)
+
+        # attempt to get the second trust using a trust token
+        auth_data = self.build_authentication_request(
+            token=trust_token,
+            trust_id=trust1['id'])
+        r = self.v3_authenticate_token(auth_data, expected_status=403)
+
+    def assertTrustTokensRevoked(self, trust_id):
+        revocation_response = self.get('/OS-REVOKE/events',
+                                       expected_status=200)
+        revocation_events = revocation_response.json_body['events']
+        found = False
+        for event in revocation_events:
+            if event.get('OS-TRUST:trust_id') == trust_id:
+                found = True
+        self.assertTrue(found, 'event with trust_id %s not found in list' %
+                        trust_id)
+
+    def test_delete_trust_revokes_tokens(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            impersonation=False,
+            expires=dict(minutes=1),
+            role_ids=[self.role_id])
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+        trust = self.assertValidTrustResponse(r)
+        trust_id = trust['id']
+        auth_data = self.build_authentication_request(
+            user_id=self.trustee_user['id'],
+            password=self.trustee_user['password'],
+            trust_id=trust_id)
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidProjectTrustScopedTokenResponse(
+            r, self.trustee_user)
+        trust_token = r.headers['X-Subject-Token']
+        self.delete('/OS-TRUST/trusts/%(trust_id)s' % {
+            'trust_id': trust_id},
+            expected_status=204)
+        headers = {'X-Subject-Token': trust_token}
+        self.head('/auth/tokens', headers=headers, expected_status=404)
+        self.assertTrustTokensRevoked(trust_id)
+
+    def disable_user(self, user):
+        user['enabled'] = False
+        self.identity_api.update_user(user['id'], user)
+
+    def test_trust_get_token_fails_if_trustor_disabled(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            impersonation=False,
+            expires=dict(minutes=1),
+            role_ids=[self.role_id])
+
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+
+        trust = self.assertValidTrustResponse(r, ref)
+
+        auth_data = self.build_authentication_request(
+            user_id=self.trustee_user['id'],
+            password=self.trustee_user['password'],
+            trust_id=trust['id'])
+        self.v3_authenticate_token(auth_data, expected_status=201)
+
+        self.disable_user(self.user)
+
+        auth_data = self.build_authentication_request(
+            user_id=self.trustee_user['id'],
+            password=self.trustee_user['password'],
+            trust_id=trust['id'])
+        self.v3_authenticate_token(auth_data, expected_status=403)
+
+    def test_trust_get_token_fails_if_trustee_disabled(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            impersonation=False,
+            expires=dict(minutes=1),
+            role_ids=[self.role_id])
+
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+
+        trust = self.assertValidTrustResponse(r, ref)
+
+        auth_data = self.build_authentication_request(
+            user_id=self.trustee_user['id'],
+            password=self.trustee_user['password'],
+            trust_id=trust['id'])
+        self.v3_authenticate_token(auth_data, expected_status=201)
+
+        self.disable_user(self.trustee_user)
+
+        auth_data = self.build_authentication_request(
+            user_id=self.trustee_user['id'],
+            password=self.trustee_user['password'],
+            trust_id=trust['id'])
+        self.v3_authenticate_token(auth_data, expected_status=401)
+
+    def test_delete_trust(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            impersonation=False,
+            expires=dict(minutes=1),
+            role_ids=[self.role_id])
+
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+
+        trust = self.assertValidTrustResponse(r, ref)
+
+        self.delete('/OS-TRUST/trusts/%(trust_id)s' % {
+            'trust_id': trust['id']},
+            expected_status=204)
+
+        self.get('/OS-TRUST/trusts/%(trust_id)s' % {
+            'trust_id': trust['id']},
+            expected_status=404)
+
+        self.get('/OS-TRUST/trusts/%(trust_id)s' % {
+            'trust_id': trust['id']},
+            expected_status=404)
+
+        auth_data = self.build_authentication_request(
+            user_id=self.trustee_user['id'],
+            password=self.trustee_user['password'],
+            trust_id=trust['id'])
+        self.v3_authenticate_token(auth_data, expected_status=401)
+
+    def test_list_trusts(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            impersonation=False,
+            expires=dict(minutes=1),
+            role_ids=[self.role_id])
+
+        for i in range(3):
+            r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+            self.assertValidTrustResponse(r, ref)
+
+        r = self.get('/OS-TRUST/trusts', expected_status=200)
+        trusts = r.result['trusts']
+        self.assertEqual(3, len(trusts))
+        self.assertValidTrustListResponse(r)
+
+        r = self.get('/OS-TRUST/trusts?trustor_user_id=%s' %
+                     self.user_id, expected_status=200)
+        trusts = r.result['trusts']
+        self.assertEqual(3, len(trusts))
+        self.assertValidTrustListResponse(r)
+
+        r = self.get('/OS-TRUST/trusts?trustee_user_id=%s' %
+                     self.user_id, expected_status=200)
+        trusts = r.result['trusts']
+        self.assertEqual(0, len(trusts))
+
+    def test_change_password_invalidates_trust_tokens(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            impersonation=True,
+            expires=dict(minutes=1),
+            role_ids=[self.role_id])
+
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+        trust = self.assertValidTrustResponse(r)
+
+        auth_data = self.build_authentication_request(
+            user_id=self.trustee_user['id'],
+            password=self.trustee_user['password'],
+            trust_id=trust['id'])
+        r = self.v3_authenticate_token(auth_data)
+
+        self.assertValidProjectTrustScopedTokenResponse(r, self.user)
+        trust_token = r.headers.get('X-Subject-Token')
+
+        self.get('/OS-TRUST/trusts?trustor_user_id=%s' %
+                 self.user_id, expected_status=200,
+                 token=trust_token)
+
+        self.assertValidUserResponse(
+            self.patch('/users/%s' % self.trustee_user['id'],
+                       body={'user': {'password': uuid.uuid4().hex}},
+                       expected_status=200))
+
+        self.get('/OS-TRUST/trusts?trustor_user_id=%s' %
+                 self.user_id, expected_status=401,
+                 token=trust_token)
+
+    def test_trustee_can_do_role_ops(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            impersonation=True,
+            role_ids=[self.role_id])
+
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+        trust = self.assertValidTrustResponse(r)
+
+        auth_data = self.build_authentication_request(
+            user_id=self.trustee_user['id'],
+            password=self.trustee_user['password'])
+
+        r = self.get(
+            '/OS-TRUST/trusts/%(trust_id)s/roles' % {
+                'trust_id': trust['id']},
+            auth=auth_data)
+        self.assertValidRoleListResponse(r, self.role)
+
+        self.head(
+            '/OS-TRUST/trusts/%(trust_id)s/roles/%(role_id)s' % {
+                'trust_id': trust['id'],
+                'role_id': self.role['id']},
+            auth=auth_data,
+            expected_status=200)
+
+        r = self.get(
+            '/OS-TRUST/trusts/%(trust_id)s/roles/%(role_id)s' % {
+                'trust_id': trust['id'],
+                'role_id': self.role['id']},
+            auth=auth_data,
+            expected_status=200)
+        self.assertValidRoleResponse(r, self.role)
+
+    def test_do_not_consume_remaining_uses_when_get_token_fails(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            impersonation=False,
+            expires=dict(minutes=1),
+            role_ids=[self.role_id],
+            remaining_uses=3)
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+
+        new_trust = r.result.get('trust')
+        trust_id = new_trust.get('id')
+        # Pass in another user's ID as the trustee, the result being a failed
+        # token authenticate and the remaining_uses of the trust should not be
+        # decremented.
+        auth_data = self.build_authentication_request(
+            user_id=self.default_domain_user['id'],
+            password=self.default_domain_user['password'],
+            trust_id=trust_id)
+        self.v3_authenticate_token(auth_data, expected_status=403)
+
+        r = self.get('/OS-TRUST/trusts/%s' % trust_id)
+        self.assertEqual(3, r.result.get('trust').get('remaining_uses'))
+
+
+class TestAPIProtectionWithoutAuthContextMiddleware(test_v3.RestfulTestCase):
+    def test_api_protection_with_no_auth_context_in_env(self):
+        auth_data = self.build_authentication_request(
+            user_id=self.default_domain_user['id'],
+            password=self.default_domain_user['password'],
+            project_id=self.project['id'])
+        token = self.get_requested_token(auth_data)
+        auth_controller = auth.controllers.Auth()
+        # all we care is that auth context is not in the environment and
+        # 'token_id' is used to build the auth context instead
+        context = {'subject_token_id': token,
+                   'token_id': token,
+                   'query_string': {},
+                   'environment': {}}
+        r = auth_controller.validate_token(context)
+        self.assertEqual(200, r.status_code)
+
+
+class TestAuthContext(tests.TestCase):
+    def setUp(self):
+        super(TestAuthContext, self).setUp()
+        self.auth_context = auth.controllers.AuthContext()
+
+    def test_pick_lowest_expires_at(self):
+        expires_at_1 = timeutils.isotime(timeutils.utcnow())
+        expires_at_2 = timeutils.isotime(timeutils.utcnow() +
+                                         datetime.timedelta(seconds=10))
+        # make sure auth_context picks the lowest value
+        self.auth_context['expires_at'] = expires_at_1
+        self.auth_context['expires_at'] = expires_at_2
+        self.assertEqual(expires_at_1, self.auth_context['expires_at'])
+
+    def test_identity_attribute_conflict(self):
+        for identity_attr in auth.controllers.AuthContext.IDENTITY_ATTRIBUTES:
+            self.auth_context[identity_attr] = uuid.uuid4().hex
+            if identity_attr == 'expires_at':
+                # 'expires_at' is a special case. Will test it in a separate
+                # test case.
+                continue
+            self.assertRaises(exception.Unauthorized,
+                              operator.setitem,
+                              self.auth_context,
+                              identity_attr,
+                              uuid.uuid4().hex)
+
+    def test_identity_attribute_conflict_with_none_value(self):
+        for identity_attr in auth.controllers.AuthContext.IDENTITY_ATTRIBUTES:
+            self.auth_context[identity_attr] = None
+
+            if identity_attr == 'expires_at':
+                # 'expires_at' is a special case and is tested above.
+                self.auth_context['expires_at'] = uuid.uuid4().hex
+                continue
+
+            self.assertRaises(exception.Unauthorized,
+                              operator.setitem,
+                              self.auth_context,
+                              identity_attr,
+                              uuid.uuid4().hex)
+
+    def test_non_identity_attribute_conflict_override(self):
+        # for attributes Keystone doesn't know about, make sure they can be
+        # freely manipulated
+        attr_name = uuid.uuid4().hex
+        attr_val_1 = uuid.uuid4().hex
+        attr_val_2 = uuid.uuid4().hex
+        self.auth_context[attr_name] = attr_val_1
+        self.auth_context[attr_name] = attr_val_2
+        self.assertEqual(attr_val_2, self.auth_context[attr_name])
+
+
+class TestAuthSpecificData(test_v3.RestfulTestCase):
+
+    def test_get_catalog_project_scoped_token(self):
+        """Call ``GET /auth/catalog`` with a project-scoped token."""
+        r = self.get(
+            '/auth/catalog',
+            expected_status=200)
+        self.assertValidCatalogResponse(r)
+
+    def test_get_catalog_domain_scoped_token(self):
+        """Call ``GET /auth/catalog`` with a domain-scoped token."""
+        # grant a domain role to a user
+        self.put(path='/domains/%s/users/%s/roles/%s' % (
+            self.domain['id'], self.user['id'], self.role['id']))
+
+        self.get(
+            '/auth/catalog',
+            auth=self.build_authentication_request(
+                user_id=self.user['id'],
+                password=self.user['password'],
+                domain_id=self.domain['id']),
+            expected_status=403)
+
+    def test_get_catalog_unscoped_token(self):
+        """Call ``GET /auth/catalog`` with an unscoped token."""
+        self.get(
+            '/auth/catalog',
+            auth=self.build_authentication_request(
+                user_id=self.default_domain_user['id'],
+                password=self.default_domain_user['password']),
+            expected_status=403)
+
+    def test_get_catalog_no_token(self):
+        """Call ``GET /auth/catalog`` without a token."""
+        self.get(
+            '/auth/catalog',
+            noauth=True,
+            expected_status=401)
+
+    def test_get_projects_project_scoped_token(self):
+        r = self.get('/auth/projects', expected_status=200)
+        self.assertThat(r.json['projects'], matchers.HasLength(1))
+        self.assertValidProjectListResponse(r)
+
+    def test_get_domains_project_scoped_token(self):
+        self.put(path='/domains/%s/users/%s/roles/%s' % (
+            self.domain['id'], self.user['id'], self.role['id']))
+
+        r = self.get('/auth/domains', expected_status=200)
+        self.assertThat(r.json['domains'], matchers.HasLength(1))
+        self.assertValidDomainListResponse(r)
+
+
+class TestFernetTokenProvider(test_v3.RestfulTestCase):
+    def setUp(self):
+        super(TestFernetTokenProvider, self).setUp()
+        self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
+
+    def _make_auth_request(self, auth_data):
+        resp = self.post('/auth/tokens', body=auth_data, expected_status=201)
+        token = resp.headers.get('X-Subject-Token')
+        self.assertLess(len(token), 255)
+        return token
+
+    def _get_unscoped_token(self):
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'])
+        return self._make_auth_request(auth_data)
+
+    def _get_project_scoped_token(self):
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=self.project_id)
+        return self._make_auth_request(auth_data)
+
+    def _get_domain_scoped_token(self):
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            domain_id=self.domain_id)
+        return self._make_auth_request(auth_data)
+
+    def _get_trust_scoped_token(self, trustee_user, trust):
+        auth_data = self.build_authentication_request(
+            user_id=trustee_user['id'],
+            password=trustee_user['password'],
+            trust_id=trust['id'])
+        return self._make_auth_request(auth_data)
+
+    def _validate_token(self, token, expected_status=200):
+        return self.get(
+            '/auth/tokens',
+            headers={'X-Subject-Token': token},
+            expected_status=expected_status)
+
+    def _revoke_token(self, token, expected_status=204):
+        return self.delete(
+            '/auth/tokens',
+            headers={'X-Subject-Token': token},
+            expected_status=expected_status)
+
+    def _set_user_enabled(self, user, enabled=True):
+        user['enabled'] = enabled
+        self.identity_api.update_user(user['id'], user)
+
+    def _create_trust(self):
+        # Create a trustee user
+        trustee_user_ref = self.new_user_ref(domain_id=self.domain_id)
+        trustee_user = self.identity_api.create_user(trustee_user_ref)
+        trustee_user['password'] = trustee_user_ref['password']
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=trustee_user['id'],
+            project_id=self.project_id,
+            impersonation=True,
+            role_ids=[self.role_id])
+
+        # Create a trust
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+        trust = self.assertValidTrustResponse(r)
+        return (trustee_user, trust)
+
+    def config_overrides(self):
+        super(TestFernetTokenProvider, self).config_overrides()
+        self.config_fixture.config(
+            group='token',
+            provider='keystone.token.providers.fernet.Provider')
+
+    def test_validate_unscoped_token(self):
+        unscoped_token = self._get_unscoped_token()
+        self._validate_token(unscoped_token)
+
+    def test_validate_tampered_unscoped_token_fails(self):
+        unscoped_token = self._get_unscoped_token()
+        tampered_token = (unscoped_token[:50] + uuid.uuid4().hex +
+                          unscoped_token[50 + 32:])
+        self._validate_token(tampered_token, expected_status=401)
+
+    def test_revoke_unscoped_token(self):
+        unscoped_token = self._get_unscoped_token()
+        self._validate_token(unscoped_token)
+        self._revoke_token(unscoped_token)
+        self._validate_token(unscoped_token, expected_status=404)
+
+    def test_unscoped_token_is_invalid_after_disabling_user(self):
+        unscoped_token = self._get_unscoped_token()
+        # Make sure the token is valid
+        self._validate_token(unscoped_token)
+        # Disable the user
+        self._set_user_enabled(self.user, enabled=False)
+        # Ensure validating a token for a disabled user fails
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api.validate_token,
+                          unscoped_token)
+
+    def test_unscoped_token_is_invalid_after_enabling_disabled_user(self):
+        unscoped_token = self._get_unscoped_token()
+        # Make sure the token is valid
+        self._validate_token(unscoped_token)
+        # Disable the user
+        self._set_user_enabled(self.user, enabled=False)
+        # Ensure validating a token for a disabled user fails
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api.validate_token,
+                          unscoped_token)
+        # Enable the user
+        self._set_user_enabled(self.user)
+        # Ensure validating a token for a re-enabled user fails
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api.validate_token,
+                          unscoped_token)
+
+    def test_unscoped_token_is_invalid_after_disabling_user_domain(self):
+        unscoped_token = self._get_unscoped_token()
+        # Make sure the token is valid
+        self._validate_token(unscoped_token)
+        # Disable the user's domain
+        self.domain['enabled'] = False
+        self.resource_api.update_domain(self.domain['id'], self.domain)
+        # Ensure validating a token for a disabled user fails
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api.validate_token,
+                          unscoped_token)
+
+    def test_unscoped_token_is_invalid_after_changing_user_password(self):
+        unscoped_token = self._get_unscoped_token()
+        # Make sure the token is valid
+        self._validate_token(unscoped_token)
+        # Change user's password
+        self.user['password'] = 'Password1'
+        self.identity_api.update_user(self.user['id'], self.user)
+        # Ensure updating user's password revokes existing user's tokens
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api.validate_token,
+                          unscoped_token)
+
+    def test_validate_project_scoped_token(self):
+        project_scoped_token = self._get_project_scoped_token()
+        self._validate_token(project_scoped_token)
+
+    def test_validate_domain_scoped_token(self):
+        # Grant user access to domain
+        self.assignment_api.create_grant(self.role['id'],
+                                         user_id=self.user['id'],
+                                         domain_id=self.domain['id'])
+        domain_scoped_token = self._get_domain_scoped_token()
+        resp = self._validate_token(domain_scoped_token)
+        resp_json = json.loads(resp.body)
+        self.assertIsNotNone(resp_json['token']['catalog'])
+        self.assertIsNotNone(resp_json['token']['roles'])
+        self.assertIsNotNone(resp_json['token']['domain'])
+
+    def test_validate_tampered_project_scoped_token_fails(self):
+        project_scoped_token = self._get_project_scoped_token()
+        tampered_token = (project_scoped_token[:50] + uuid.uuid4().hex +
+                          project_scoped_token[50 + 32:])
+        self._validate_token(tampered_token, expected_status=401)
+
+    def test_revoke_project_scoped_token(self):
+        project_scoped_token = self._get_project_scoped_token()
+        self._validate_token(project_scoped_token)
+        self._revoke_token(project_scoped_token)
+        self._validate_token(project_scoped_token, expected_status=404)
+
+    def test_project_scoped_token_is_invalid_after_disabling_user(self):
+        project_scoped_token = self._get_project_scoped_token()
+        # Make sure the token is valid
+        self._validate_token(project_scoped_token)
+        # Disable the user
+        self._set_user_enabled(self.user, enabled=False)
+        # Ensure validating a token for a disabled user fails
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api.validate_token,
+                          project_scoped_token)
+
+    def test_domain_scoped_token_is_invalid_after_disabling_user(self):
+        # Grant user access to domain
+        self.assignment_api.create_grant(self.role['id'],
+                                         user_id=self.user['id'],
+                                         domain_id=self.domain['id'])
+        domain_scoped_token = self._get_domain_scoped_token()
+        # Make sure the token is valid
+        self._validate_token(domain_scoped_token)
+        # Disable user
+        self._set_user_enabled(self.user, enabled=False)
+        # Ensure validating a token for a disabled user fails
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api.validate_token,
+                          domain_scoped_token)
+
+    def test_domain_scoped_token_is_invalid_after_deleting_grant(self):
+        # Grant user access to domain
+        self.assignment_api.create_grant(self.role['id'],
+                                         user_id=self.user['id'],
+                                         domain_id=self.domain['id'])
+        domain_scoped_token = self._get_domain_scoped_token()
+        # Make sure the token is valid
+        self._validate_token(domain_scoped_token)
+        # Delete access to domain
+        self.assignment_api.delete_grant(self.role['id'],
+                                         user_id=self.user['id'],
+                                         domain_id=self.domain['id'])
+        # Ensure validating a token for a disabled user fails
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api.validate_token,
+                          domain_scoped_token)
+
+    def test_project_scoped_token_invalid_after_changing_user_password(self):
+        project_scoped_token = self._get_project_scoped_token()
+        # Make sure the token is valid
+        self._validate_token(project_scoped_token)
+        # Update user's password
+        self.user['password'] = 'Password1'
+        self.identity_api.update_user(self.user['id'], self.user)
+        # Ensure updating user's password revokes existing tokens
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api.validate_token,
+                          project_scoped_token)
+
+    def test_project_scoped_token_invalid_after_disabling_project(self):
+        project_scoped_token = self._get_project_scoped_token()
+        # Make sure the token is valid
+        self._validate_token(project_scoped_token)
+        # Disable project
+        self.project['enabled'] = False
+        self.resource_api.update_project(self.project['id'], self.project)
+        # Ensure validating a token for a disabled project fails
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api.validate_token,
+                          project_scoped_token)
+
+    def test_domain_scoped_token_invalid_after_disabling_domain(self):
+        # Grant user access to domain
+        self.assignment_api.create_grant(self.role['id'],
+                                         user_id=self.user['id'],
+                                         domain_id=self.domain['id'])
+        domain_scoped_token = self._get_domain_scoped_token()
+        # Make sure the token is valid
+        self._validate_token(domain_scoped_token)
+        # Disable domain
+        self.domain['enabled'] = False
+        self.resource_api.update_domain(self.domain['id'], self.domain)
+        # Ensure validating a token for a disabled domain fails
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api.validate_token,
+                          domain_scoped_token)
+
+    def test_rescope_unscoped_token_with_trust(self):
+        trustee_user, trust = self._create_trust()
+        trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
+        self.assertLess(len(trust_scoped_token), 255)
+
+    def test_validate_a_trust_scoped_token(self):
+        trustee_user, trust = self._create_trust()
+        trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
+        # Validate a trust scoped token
+        self._validate_token(trust_scoped_token)
+
+    def test_validate_tampered_trust_scoped_token_fails(self):
+        trustee_user, trust = self._create_trust()
+        trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
+        # Get a trust scoped token
+        tampered_token = (trust_scoped_token[:50] + uuid.uuid4().hex +
+                          trust_scoped_token[50 + 32:])
+        self._validate_token(tampered_token, expected_status=401)
+
+    def test_revoke_trust_scoped_token(self):
+        trustee_user, trust = self._create_trust()
+        trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
+        # Validate a trust scoped token
+        self._validate_token(trust_scoped_token)
+        self._revoke_token(trust_scoped_token)
+        self._validate_token(trust_scoped_token, expected_status=404)
+
+    def test_trust_scoped_token_is_invalid_after_disabling_trustee(self):
+        trustee_user, trust = self._create_trust()
+        trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
+        # Validate a trust scoped token
+        self._validate_token(trust_scoped_token)
+
+        # Disable trustee
+        trustee_update_ref = dict(enabled=False)
+        self.identity_api.update_user(trustee_user['id'], trustee_update_ref)
+        # Ensure validating a token for a disabled user fails
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api.validate_token,
+                          trust_scoped_token)
+
+    def test_trust_scoped_token_invalid_after_changing_trustee_password(self):
+        trustee_user, trust = self._create_trust()
+        trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
+        # Validate a trust scoped token
+        self._validate_token(trust_scoped_token)
+        # Change trustee's password
+        trustee_update_ref = dict(password='Password1')
+        self.identity_api.update_user(trustee_user['id'], trustee_update_ref)
+        # Ensure updating trustee's password revokes existing tokens
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api.validate_token,
+                          trust_scoped_token)
+
+    def test_trust_scoped_token_is_invalid_after_disabling_trustor(self):
+        trustee_user, trust = self._create_trust()
+        trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
+        # Validate a trust scoped token
+        self._validate_token(trust_scoped_token)
+
+        # Disable the trustor
+        trustor_update_ref = dict(enabled=False)
+        self.identity_api.update_user(self.user['id'], trustor_update_ref)
+        # Ensure validating a token for a disabled user fails
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api.validate_token,
+                          trust_scoped_token)
+
+    def test_trust_scoped_token_invalid_after_changing_trustor_password(self):
+        trustee_user, trust = self._create_trust()
+        trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
+        # Validate a trust scoped token
+        self._validate_token(trust_scoped_token)
+
+        # Change trustor's password
+        trustor_update_ref = dict(password='Password1')
+        self.identity_api.update_user(self.user['id'], trustor_update_ref)
+        # Ensure updating trustor's password revokes existing user's tokens
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api.validate_token,
+                          trust_scoped_token)
+
+    def test_trust_scoped_token_invalid_after_disabled_trustor_domain(self):
+        trustee_user, trust = self._create_trust()
+        trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
+        # Validate a trust scoped token
+        self._validate_token(trust_scoped_token)
+
+        # Disable trustor's domain
+        self.domain['enabled'] = False
+        self.resource_api.update_domain(self.domain['id'], self.domain)
+
+        trustor_update_ref = dict(password='Password1')
+        self.identity_api.update_user(self.user['id'], trustor_update_ref)
+        # Ensure updating trustor's password revokes existing user's tokens
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api.validate_token,
+                          trust_scoped_token)
+
+    def test_v2_validate_unscoped_token_returns_401(self):
+        """Test raised exception when validating unscoped token.
+
+        Test that validating an unscoped token in v2.0 of a v3 user of a
+        non-default domain returns unauthorized.
+        """
+        unscoped_token = self._get_unscoped_token()
+        self.assertRaises(exception.Unauthorized,
+                          self.token_provider_api.validate_v2_token,
+                          unscoped_token)
+
+    def test_v2_validate_domain_scoped_token_returns_401(self):
+        """Test raised exception when validating a domain scoped token.
+
+        Test that validating an domain scoped token in v2.0
+        returns unauthorized.
+        """
+
+        # Grant user access to domain
+        self.assignment_api.create_grant(self.role['id'],
+                                         user_id=self.user['id'],
+                                         domain_id=self.domain['id'])
+
+        scoped_token = self._get_domain_scoped_token()
+        self.assertRaises(exception.Unauthorized,
+                          self.token_provider_api.validate_v2_token,
+                          scoped_token)
+
+    def test_v2_validate_trust_scoped_token(self):
+        """Test raised exception when validating a trust scoped token.
+
+        Test that validating an trust scoped token in v2.0 returns
+        unauthorized.
+        """
+
+        trustee_user, trust = self._create_trust()
+        trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
+        self.assertRaises(exception.Unauthorized,
+                          self.token_provider_api.validate_v2_token,
+                          trust_scoped_token)
+
+
+class TestAuthFernetTokenProvider(TestAuth):
+    def setUp(self):
+        super(TestAuthFernetTokenProvider, self).setUp()
+        self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
+
+    def config_overrides(self):
+        super(TestAuthFernetTokenProvider, self).config_overrides()
+        self.config_fixture.config(
+            group='token',
+            provider='keystone.token.providers.fernet.Provider')
+
+    def test_verify_with_bound_token(self):
+        self.config_fixture.config(group='token', bind='kerberos')
+        auth_data = self.build_authentication_request(
+            project_id=self.project['id'])
+        remote_user = self.default_domain_user['name']
+        self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
+                                             'AUTH_TYPE': 'Negotiate'})
+        # Bind not current supported by Fernet, see bug 1433311.
+        self.v3_authenticate_token(auth_data, expected_status=501)
+
+    def test_v2_v3_bind_token_intermix(self):
+        self.config_fixture.config(group='token', bind='kerberos')
+
+        # we need our own user registered to the default domain because of
+        # the way external auth works.
+        remote_user = self.default_domain_user['name']
+        self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
+                                             'AUTH_TYPE': 'Negotiate'})
+        body = {'auth': {}}
+        # Bind not current supported by Fernet, see bug 1433311.
+        self.admin_request(path='/v2.0/tokens',
+                           method='POST',
+                           body=body,
+                           expected_status=501)
+
+    def test_auth_with_bind_token(self):
+        self.config_fixture.config(group='token', bind=['kerberos'])
+
+        auth_data = self.build_authentication_request()
+        remote_user = self.default_domain_user['name']
+        self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
+                                             'AUTH_TYPE': 'Negotiate'})
+        # Bind not current supported by Fernet, see bug 1433311.
+        self.v3_authenticate_token(auth_data, expected_status=501)
diff --git a/keystone-moon/keystone/tests/unit/test_v3_catalog.py b/keystone-moon/keystone/tests/unit/test_v3_catalog.py
new file mode 100644 (file)
index 0000000..d231b2e
--- /dev/null
@@ -0,0 +1,746 @@
+# Copyright 2013 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+import uuid
+
+from keystone import catalog
+from keystone.tests import unit as tests
+from keystone.tests.unit.ksfixtures import database
+from keystone.tests.unit import test_v3
+
+
+class CatalogTestCase(test_v3.RestfulTestCase):
+    """Test service & endpoint CRUD."""
+
+    # region crud tests
+
+    def test_create_region_with_id(self):
+        """Call ``PUT /regions/{region_id}`` w/o an ID in the request body."""
+        ref = self.new_region_ref()
+        region_id = ref.pop('id')
+        r = self.put(
+            '/regions/%s' % region_id,
+            body={'region': ref},
+            expected_status=201)
+        self.assertValidRegionResponse(r, ref)
+        # Double-check that the region ID was kept as-is and not
+        # populated with a UUID, as is the case with POST /v3/regions
+        self.assertEqual(region_id, r.json['region']['id'])
+
+    def test_create_region_with_matching_ids(self):
+        """Call ``PUT /regions/{region_id}`` with an ID in the request body."""
+        ref = self.new_region_ref()
+        region_id = ref['id']
+        r = self.put(
+            '/regions/%s' % region_id,
+            body={'region': ref},
+            expected_status=201)
+        self.assertValidRegionResponse(r, ref)
+        # Double-check that the region ID was kept as-is and not
+        # populated with a UUID, as is the case with POST /v3/regions
+        self.assertEqual(region_id, r.json['region']['id'])
+
+    def test_create_region_with_duplicate_id(self):
+        """Call ``PUT /regions/{region_id}``."""
+        ref = dict(description="my region")
+        self.put(
+            '/regions/myregion',
+            body={'region': ref}, expected_status=201)
+        # Create region again with duplicate id
+        self.put(
+            '/regions/myregion',
+            body={'region': ref}, expected_status=409)
+
+    def test_create_region(self):
+        """Call ``POST /regions`` with an ID in the request body."""
+        # the ref will have an ID defined on it
+        ref = self.new_region_ref()
+        r = self.post(
+            '/regions',
+            body={'region': ref})
+        self.assertValidRegionResponse(r, ref)
+
+        # we should be able to get the region, having defined the ID ourselves
+        r = self.get(
+            '/regions/%(region_id)s' % {
+                'region_id': ref['id']})
+        self.assertValidRegionResponse(r, ref)
+
+    def test_create_region_with_empty_id(self):
+        """Call ``POST /regions`` with an empty ID in the request body."""
+        ref = self.new_region_ref()
+        ref['id'] = ''
+
+        r = self.post(
+            '/regions',
+            body={'region': ref}, expected_status=201)
+        self.assertValidRegionResponse(r, ref)
+        self.assertNotEmpty(r.result['region'].get('id'))
+
+    def test_create_region_without_id(self):
+        """Call ``POST /regions`` without an ID in the request body."""
+        ref = self.new_region_ref()
+
+        # instead of defining the ID ourselves...
+        del ref['id']
+
+        # let the service define the ID
+        r = self.post(
+            '/regions',
+            body={'region': ref},
+            expected_status=201)
+        self.assertValidRegionResponse(r, ref)
+
+    def test_create_region_without_description(self):
+        """Call ``POST /regions`` without description in the request body."""
+        ref = self.new_region_ref()
+
+        del ref['description']
+
+        r = self.post(
+            '/regions',
+            body={'region': ref},
+            expected_status=201)
+        # Create the description in the reference to compare to since the
+        # response should now have a description, even though we didn't send
+        # it with the original reference.
+        ref['description'] = ''
+        self.assertValidRegionResponse(r, ref)
+
+    def test_create_regions_with_same_description_string(self):
+        """Call ``POST /regions`` with same description in the request bodies.
+        """
+        # NOTE(lbragstad): Make sure we can create two regions that have the
+        # same description.
+        ref1 = self.new_region_ref()
+        ref2 = self.new_region_ref()
+
+        region_desc = 'Some Region Description'
+
+        ref1['description'] = region_desc
+        ref2['description'] = region_desc
+
+        resp1 = self.post(
+            '/regions',
+            body={'region': ref1},
+            expected_status=201)
+        self.assertValidRegionResponse(resp1, ref1)
+
+        resp2 = self.post(
+            '/regions',
+            body={'region': ref2},
+            expected_status=201)
+        self.assertValidRegionResponse(resp2, ref2)
+
+    def test_create_regions_without_descriptions(self):
+        """Call ``POST /regions`` with no description in the request bodies.
+        """
+        # NOTE(lbragstad): Make sure we can create two regions that have
+        # no description in the request body. The description should be
+        # populated by Catalog Manager.
+        ref1 = self.new_region_ref()
+        ref2 = self.new_region_ref()
+
+        del ref1['description']
+        del ref2['description']
+
+        resp1 = self.post(
+            '/regions',
+            body={'region': ref1},
+            expected_status=201)
+
+        resp2 = self.post(
+            '/regions',
+            body={'region': ref2},
+            expected_status=201)
+        # Create the descriptions in the references to compare to since the
+        # responses should now have descriptions, even though we didn't send
+        # a description with the original references.
+        ref1['description'] = ''
+        ref2['description'] = ''
+        self.assertValidRegionResponse(resp1, ref1)
+        self.assertValidRegionResponse(resp2, ref2)
+
+    def test_create_region_with_conflicting_ids(self):
+        """Call ``PUT /regions/{region_id}`` with conflicting region IDs."""
+        # the region ref is created with an ID
+        ref = self.new_region_ref()
+
+        # but instead of using that ID, make up a new, conflicting one
+        self.put(
+            '/regions/%s' % uuid.uuid4().hex,
+            body={'region': ref},
+            expected_status=400)
+
+    def test_list_regions(self):
+        """Call ``GET /regions``."""
+        r = self.get('/regions')
+        self.assertValidRegionListResponse(r, ref=self.region)
+
+    def _create_region_with_parent_id(self, parent_id=None):
+        ref = self.new_region_ref()
+        ref['parent_region_id'] = parent_id
+        return self.post(
+            '/regions',
+            body={'region': ref})
+
+    def test_list_regions_filtered_by_parent_region_id(self):
+        """Call ``GET /regions?parent_region_id={parent_region_id}``."""
+        new_region = self._create_region_with_parent_id()
+        parent_id = new_region.result['region']['id']
+
+        new_region = self._create_region_with_parent_id(parent_id)
+        new_region = self._create_region_with_parent_id(parent_id)
+
+        r = self.get('/regions?parent_region_id=%s' % parent_id)
+
+        for region in r.result['regions']:
+            self.assertEqual(parent_id, region['parent_region_id'])
+
+    def test_get_region(self):
+        """Call ``GET /regions/{region_id}``."""
+        r = self.get('/regions/%(region_id)s' % {
+            'region_id': self.region_id})
+        self.assertValidRegionResponse(r, self.region)
+
+    def test_update_region(self):
+        """Call ``PATCH /regions/{region_id}``."""
+        region = self.new_region_ref()
+        del region['id']
+        r = self.patch('/regions/%(region_id)s' % {
+            'region_id': self.region_id},
+            body={'region': region})
+        self.assertValidRegionResponse(r, region)
+
+    def test_delete_region(self):
+        """Call ``DELETE /regions/{region_id}``."""
+
+        ref = self.new_region_ref()
+        r = self.post(
+            '/regions',
+            body={'region': ref})
+        self.assertValidRegionResponse(r, ref)
+
+        self.delete('/regions/%(region_id)s' % {
+            'region_id': ref['id']})
+
+    # service crud tests
+
+    def test_create_service(self):
+        """Call ``POST /services``."""
+        ref = self.new_service_ref()
+        r = self.post(
+            '/services',
+            body={'service': ref})
+        self.assertValidServiceResponse(r, ref)
+
+    def test_create_service_no_name(self):
+        """Call ``POST /services``."""
+        ref = self.new_service_ref()
+        del ref['name']
+        r = self.post(
+            '/services',
+            body={'service': ref})
+        ref['name'] = ''
+        self.assertValidServiceResponse(r, ref)
+
+    def test_create_service_no_enabled(self):
+        """Call ``POST /services``."""
+        ref = self.new_service_ref()
+        del ref['enabled']
+        r = self.post(
+            '/services',
+            body={'service': ref})
+        ref['enabled'] = True
+        self.assertValidServiceResponse(r, ref)
+        self.assertIs(True, r.result['service']['enabled'])
+
+    def test_create_service_enabled_false(self):
+        """Call ``POST /services``."""
+        ref = self.new_service_ref()
+        ref['enabled'] = False
+        r = self.post(
+            '/services',
+            body={'service': ref})
+        self.assertValidServiceResponse(r, ref)
+        self.assertIs(False, r.result['service']['enabled'])
+
+    def test_create_service_enabled_true(self):
+        """Call ``POST /services``."""
+        ref = self.new_service_ref()
+        ref['enabled'] = True
+        r = self.post(
+            '/services',
+            body={'service': ref})
+        self.assertValidServiceResponse(r, ref)
+        self.assertIs(True, r.result['service']['enabled'])
+
+    def test_create_service_enabled_str_true(self):
+        """Call ``POST /services``."""
+        ref = self.new_service_ref()
+        ref['enabled'] = 'True'
+        self.post('/services', body={'service': ref}, expected_status=400)
+
+    def test_create_service_enabled_str_false(self):
+        """Call ``POST /services``."""
+        ref = self.new_service_ref()
+        ref['enabled'] = 'False'
+        self.post('/services', body={'service': ref}, expected_status=400)
+
+    def test_create_service_enabled_str_random(self):
+        """Call ``POST /services``."""
+        ref = self.new_service_ref()
+        ref['enabled'] = 'puppies'
+        self.post('/services', body={'service': ref}, expected_status=400)
+
+    def test_list_services(self):
+        """Call ``GET /services``."""
+        r = self.get('/services')
+        self.assertValidServiceListResponse(r, ref=self.service)
+
+    def _create_random_service(self):
+        ref = self.new_service_ref()
+        ref['enabled'] = True
+        response = self.post(
+            '/services',
+            body={'service': ref})
+        return response.json['service']
+
+    def test_filter_list_services_by_type(self):
+        """Call ``GET /services?type=<some type>``."""
+        target_ref = self._create_random_service()
+
+        # create unrelated services
+        self._create_random_service()
+        self._create_random_service()
+
+        response = self.get('/services?type=' + target_ref['type'])
+        self.assertValidServiceListResponse(response, ref=target_ref)
+
+        filtered_service_list = response.json['services']
+        self.assertEqual(1, len(filtered_service_list))
+
+        filtered_service = filtered_service_list[0]
+        self.assertEqual(target_ref['type'], filtered_service['type'])
+
+    def test_filter_list_services_by_name(self):
+        """Call ``GET /services?name=<some name>``."""
+        target_ref = self._create_random_service()
+
+        # create unrelated services
+        self._create_random_service()
+        self._create_random_service()
+
+        response = self.get('/services?name=' + target_ref['name'])
+        self.assertValidServiceListResponse(response, ref=target_ref)
+
+        filtered_service_list = response.json['services']
+        self.assertEqual(1, len(filtered_service_list))
+
+        filtered_service = filtered_service_list[0]
+        self.assertEqual(target_ref['name'], filtered_service['name'])
+
+    def test_get_service(self):
+        """Call ``GET /services/{service_id}``."""
+        r = self.get('/services/%(service_id)s' % {
+            'service_id': self.service_id})
+        self.assertValidServiceResponse(r, self.service)
+
+    def test_update_service(self):
+        """Call ``PATCH /services/{service_id}``."""
+        service = self.new_service_ref()
+        del service['id']
+        r = self.patch('/services/%(service_id)s' % {
+            'service_id': self.service_id},
+            body={'service': service})
+        self.assertValidServiceResponse(r, service)
+
+    def test_delete_service(self):
+        """Call ``DELETE /services/{service_id}``."""
+        self.delete('/services/%(service_id)s' % {
+            'service_id': self.service_id})
+
+    # endpoint crud tests
+
+    def test_list_endpoints(self):
+        """Call ``GET /endpoints``."""
+        r = self.get('/endpoints')
+        self.assertValidEndpointListResponse(r, ref=self.endpoint)
+
+    def test_create_endpoint_no_enabled(self):
+        """Call ``POST /endpoints``."""
+        ref = self.new_endpoint_ref(service_id=self.service_id)
+        r = self.post(
+            '/endpoints',
+            body={'endpoint': ref})
+        ref['enabled'] = True
+        self.assertValidEndpointResponse(r, ref)
+
+    def test_create_endpoint_enabled_true(self):
+        """Call ``POST /endpoints`` with enabled: true."""
+        ref = self.new_endpoint_ref(service_id=self.service_id,
+                                    enabled=True)
+        r = self.post(
+            '/endpoints',
+            body={'endpoint': ref})
+        self.assertValidEndpointResponse(r, ref)
+
+    def test_create_endpoint_enabled_false(self):
+        """Call ``POST /endpoints`` with enabled: false."""
+        ref = self.new_endpoint_ref(service_id=self.service_id,
+                                    enabled=False)
+        r = self.post(
+            '/endpoints',
+            body={'endpoint': ref})
+        self.assertValidEndpointResponse(r, ref)
+
+    def test_create_endpoint_enabled_str_true(self):
+        """Call ``POST /endpoints`` with enabled: 'True'."""
+        ref = self.new_endpoint_ref(service_id=self.service_id,
+                                    enabled='True')
+        self.post(
+            '/endpoints',
+            body={'endpoint': ref},
+            expected_status=400)
+
+    def test_create_endpoint_enabled_str_false(self):
+        """Call ``POST /endpoints`` with enabled: 'False'."""
+        ref = self.new_endpoint_ref(service_id=self.service_id,
+                                    enabled='False')
+        self.post(
+            '/endpoints',
+            body={'endpoint': ref},
+            expected_status=400)
+
+    def test_create_endpoint_enabled_str_random(self):
+        """Call ``POST /endpoints`` with enabled: 'puppies'."""
+        ref = self.new_endpoint_ref(service_id=self.service_id,
+                                    enabled='puppies')
+        self.post(
+            '/endpoints',
+            body={'endpoint': ref},
+            expected_status=400)
+
+    def test_create_endpoint_with_invalid_region_id(self):
+        """Call ``POST /endpoints``."""
+        ref = self.new_endpoint_ref(service_id=self.service_id)
+        ref["region_id"] = uuid.uuid4().hex
+        self.post('/endpoints', body={'endpoint': ref}, expected_status=400)
+
+    def test_create_endpoint_with_region(self):
+        """EndpointV3 creates the region before creating the endpoint, if
+        endpoint is provided with 'region' and no 'region_id'
+        """
+        ref = self.new_endpoint_ref(service_id=self.service_id)
+        ref["region"] = uuid.uuid4().hex
+        ref.pop('region_id')
+        self.post('/endpoints', body={'endpoint': ref}, expected_status=201)
+        # Make sure the region is created
+        self.get('/regions/%(region_id)s' % {
+            'region_id': ref["region"]})
+
+    def test_create_endpoint_with_no_region(self):
+        """EndpointV3 allows to creates the endpoint without region."""
+        ref = self.new_endpoint_ref(service_id=self.service_id)
+        ref.pop('region_id')
+        self.post('/endpoints', body={'endpoint': ref}, expected_status=201)
+
+    def test_create_endpoint_with_empty_url(self):
+        """Call ``POST /endpoints``."""
+        ref = self.new_endpoint_ref(service_id=self.service_id)
+        ref["url"] = ''
+        self.post('/endpoints', body={'endpoint': ref}, expected_status=400)
+
+    def test_get_endpoint(self):
+        """Call ``GET /endpoints/{endpoint_id}``."""
+        r = self.get(
+            '/endpoints/%(endpoint_id)s' % {
+                'endpoint_id': self.endpoint_id})
+        self.assertValidEndpointResponse(r, self.endpoint)
+
+    def test_update_endpoint(self):
+        """Call ``PATCH /endpoints/{endpoint_id}``."""
+        ref = self.new_endpoint_ref(service_id=self.service_id)
+        del ref['id']
+        r = self.patch(
+            '/endpoints/%(endpoint_id)s' % {
+                'endpoint_id': self.endpoint_id},
+            body={'endpoint': ref})
+        ref['enabled'] = True
+        self.assertValidEndpointResponse(r, ref)
+
+    def test_update_endpoint_enabled_true(self):
+        """Call ``PATCH /endpoints/{endpoint_id}`` with enabled: True."""
+        r = self.patch(
+            '/endpoints/%(endpoint_id)s' % {
+                'endpoint_id': self.endpoint_id},
+            body={'endpoint': {'enabled': True}})
+        self.assertValidEndpointResponse(r, self.endpoint)
+
+    def test_update_endpoint_enabled_false(self):
+        """Call ``PATCH /endpoints/{endpoint_id}`` with enabled: False."""
+        r = self.patch(
+            '/endpoints/%(endpoint_id)s' % {
+                'endpoint_id': self.endpoint_id},
+            body={'endpoint': {'enabled': False}})
+        exp_endpoint = copy.copy(self.endpoint)
+        exp_endpoint['enabled'] = False
+        self.assertValidEndpointResponse(r, exp_endpoint)
+
+    def test_update_endpoint_enabled_str_true(self):
+        """Call ``PATCH /endpoints/{endpoint_id}`` with enabled: 'True'."""
+        self.patch(
+            '/endpoints/%(endpoint_id)s' % {
+                'endpoint_id': self.endpoint_id},
+            body={'endpoint': {'enabled': 'True'}},
+            expected_status=400)
+
+    def test_update_endpoint_enabled_str_false(self):
+        """Call ``PATCH /endpoints/{endpoint_id}`` with enabled: 'False'."""
+        self.patch(
+            '/endpoints/%(endpoint_id)s' % {
+                'endpoint_id': self.endpoint_id},
+            body={'endpoint': {'enabled': 'False'}},
+            expected_status=400)
+
+    def test_update_endpoint_enabled_str_random(self):
+        """Call ``PATCH /endpoints/{endpoint_id}`` with enabled: 'kitties'."""
+        self.patch(
+            '/endpoints/%(endpoint_id)s' % {
+                'endpoint_id': self.endpoint_id},
+            body={'endpoint': {'enabled': 'kitties'}},
+            expected_status=400)
+
+    def test_delete_endpoint(self):
+        """Call ``DELETE /endpoints/{endpoint_id}``."""
+        self.delete(
+            '/endpoints/%(endpoint_id)s' % {
+                'endpoint_id': self.endpoint_id})
+
+    def test_create_endpoint_on_v2(self):
+        # clear the v3 endpoint so we only have endpoints created on v2
+        self.delete(
+            '/endpoints/%(endpoint_id)s' % {
+                'endpoint_id': self.endpoint_id})
+
+        # create a v3 endpoint ref, and then tweak it back to a v2-style ref
+        ref = self.new_endpoint_ref(service_id=self.service['id'])
+        del ref['id']
+        del ref['interface']
+        ref['publicurl'] = ref.pop('url')
+        ref['internalurl'] = None
+        ref['region'] = ref['region_id']
+        del ref['region_id']
+        # don't set adminurl to ensure it's absence is handled like internalurl
+
+        # create the endpoint on v2 (using a v3 token)
+        r = self.admin_request(
+            method='POST',
+            path='/v2.0/endpoints',
+            token=self.get_scoped_token(),
+            body={'endpoint': ref})
+        endpoint_v2 = r.result['endpoint']
+
+        # test the endpoint on v3
+        r = self.get('/endpoints')
+        endpoints = self.assertValidEndpointListResponse(r)
+        self.assertEqual(1, len(endpoints))
+        endpoint_v3 = endpoints.pop()
+
+        # these attributes are identical between both APIs
+        self.assertEqual(ref['region'], endpoint_v3['region_id'])
+        self.assertEqual(ref['service_id'], endpoint_v3['service_id'])
+        self.assertEqual(ref['description'], endpoint_v3['description'])
+
+        # a v2 endpoint is not quite the same concept as a v3 endpoint, so they
+        # receive different identifiers
+        self.assertNotEqual(endpoint_v2['id'], endpoint_v3['id'])
+
+        # v2 has a publicurl; v3 has a url + interface type
+        self.assertEqual(ref['publicurl'], endpoint_v3['url'])
+        self.assertEqual('public', endpoint_v3['interface'])
+
+        # tests for bug 1152632 -- these attributes were being returned by v3
+        self.assertNotIn('publicurl', endpoint_v3)
+        self.assertNotIn('adminurl', endpoint_v3)
+        self.assertNotIn('internalurl', endpoint_v3)
+
+        # test for bug 1152635 -- this attribute was being returned by v3
+        self.assertNotIn('legacy_endpoint_id', endpoint_v3)
+
+        self.assertEqual(endpoint_v2['region'], endpoint_v3['region_id'])
+
+
+class TestCatalogAPISQL(tests.TestCase):
+    """Tests for the catalog Manager against the SQL backend.
+
+    """
+
+    def setUp(self):
+        super(TestCatalogAPISQL, self).setUp()
+        self.useFixture(database.Database())
+        self.catalog_api = catalog.Manager()
+
+        self.service_id = uuid.uuid4().hex
+        service = {'id': self.service_id, 'name': uuid.uuid4().hex}
+        self.catalog_api.create_service(self.service_id, service)
+
+        endpoint = self.new_endpoint_ref(service_id=self.service_id)
+        self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+
+    def config_overrides(self):
+        super(TestCatalogAPISQL, self).config_overrides()
+        self.config_fixture.config(
+            group='catalog',
+            driver='keystone.catalog.backends.sql.Catalog')
+
+    def new_endpoint_ref(self, service_id):
+        return {
+            'id': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+            'interface': uuid.uuid4().hex[:8],
+            'service_id': service_id,
+            'url': uuid.uuid4().hex,
+            'region': uuid.uuid4().hex,
+        }
+
+    def test_get_catalog_ignores_endpoints_with_invalid_urls(self):
+        user_id = uuid.uuid4().hex
+        tenant_id = uuid.uuid4().hex
+
+        # the only endpoint in the catalog is the one created in setUp
+        catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id)
+        self.assertEqual(1, len(catalog[0]['endpoints']))
+        # it's also the only endpoint in the backend
+        self.assertEqual(1, len(self.catalog_api.list_endpoints()))
+
+        # create a new, invalid endpoint - malformed type declaration
+        ref = self.new_endpoint_ref(self.service_id)
+        ref['url'] = 'http://keystone/%(tenant_id)'
+        self.catalog_api.create_endpoint(ref['id'], ref)
+
+        # create a new, invalid endpoint - nonexistent key
+        ref = self.new_endpoint_ref(self.service_id)
+        ref['url'] = 'http://keystone/%(you_wont_find_me)s'
+        self.catalog_api.create_endpoint(ref['id'], ref)
+
+        # verify that the invalid endpoints don't appear in the catalog
+        catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id)
+        self.assertEqual(1, len(catalog[0]['endpoints']))
+        # all three appear in the backend
+        self.assertEqual(3, len(self.catalog_api.list_endpoints()))
+
+    def test_get_catalog_always_returns_service_name(self):
+        user_id = uuid.uuid4().hex
+        tenant_id = uuid.uuid4().hex
+
+        # create a service, with a name
+        named_svc = {
+            'id': uuid.uuid4().hex,
+            'type': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+        }
+        self.catalog_api.create_service(named_svc['id'], named_svc)
+        endpoint = self.new_endpoint_ref(service_id=named_svc['id'])
+        self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+
+        # create a service, with no name
+        unnamed_svc = {
+            'id': uuid.uuid4().hex,
+            'type': uuid.uuid4().hex
+        }
+        self.catalog_api.create_service(unnamed_svc['id'], unnamed_svc)
+        endpoint = self.new_endpoint_ref(service_id=unnamed_svc['id'])
+        self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+
+        catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id)
+
+        named_endpoint = [ep for ep in catalog
+                          if ep['type'] == named_svc['type']][0]
+        self.assertEqual(named_svc['name'], named_endpoint['name'])
+
+        unnamed_endpoint = [ep for ep in catalog
+                            if ep['type'] == unnamed_svc['type']][0]
+        self.assertEqual('', unnamed_endpoint['name'])
+
+
+# TODO(dstanek): this needs refactoring with the test above, but we are in a
+# crunch so that will happen in a future patch.
+class TestCatalogAPISQLRegions(tests.TestCase):
+    """Tests for the catalog Manager against the SQL backend.
+
+    """
+
+    def setUp(self):
+        super(TestCatalogAPISQLRegions, self).setUp()
+        self.useFixture(database.Database())
+        self.catalog_api = catalog.Manager()
+
+    def config_overrides(self):
+        super(TestCatalogAPISQLRegions, self).config_overrides()
+        self.config_fixture.config(
+            group='catalog',
+            driver='keystone.catalog.backends.sql.Catalog')
+
+    def new_endpoint_ref(self, service_id):
+        return {
+            'id': uuid.uuid4().hex,
+            'name': uuid.uuid4().hex,
+            'description': uuid.uuid4().hex,
+            'interface': uuid.uuid4().hex[:8],
+            'service_id': service_id,
+            'url': uuid.uuid4().hex,
+            'region_id': uuid.uuid4().hex,
+        }
+
+    def test_get_catalog_returns_proper_endpoints_with_no_region(self):
+        service_id = uuid.uuid4().hex
+        service = {'id': service_id, 'name': uuid.uuid4().hex}
+        self.catalog_api.create_service(service_id, service)
+
+        endpoint = self.new_endpoint_ref(service_id=service_id)
+        del endpoint['region_id']
+        self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+
+        user_id = uuid.uuid4().hex
+        tenant_id = uuid.uuid4().hex
+
+        catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id)
+        self.assertValidCatalogEndpoint(
+            catalog[0]['endpoints'][0], ref=endpoint)
+
+    def test_get_catalog_returns_proper_endpoints_with_region(self):
+        service_id = uuid.uuid4().hex
+        service = {'id': service_id, 'name': uuid.uuid4().hex}
+        self.catalog_api.create_service(service_id, service)
+
+        endpoint = self.new_endpoint_ref(service_id=service_id)
+        self.catalog_api.create_region({'id': endpoint['region_id']})
+        self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+
+        endpoint = self.catalog_api.get_endpoint(endpoint['id'])
+        user_id = uuid.uuid4().hex
+        tenant_id = uuid.uuid4().hex
+
+        catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id)
+        self.assertValidCatalogEndpoint(
+            catalog[0]['endpoints'][0], ref=endpoint)
+
+    def assertValidCatalogEndpoint(self, entity, ref=None):
+        keys = ['description', 'id', 'interface', 'name', 'region_id', 'url']
+        for k in keys:
+            self.assertEqual(ref.get(k), entity[k], k)
+        self.assertEqual(entity['region_id'], entity['region'])
diff --git a/keystone-moon/keystone/tests/unit/test_v3_controller.py b/keystone-moon/keystone/tests/unit/test_v3_controller.py
new file mode 100644 (file)
index 0000000..3ac4ba5
--- /dev/null
@@ -0,0 +1,52 @@
+# Copyright 2014 CERN.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import six
+from testtools import matchers
+
+from keystone.common import controller
+from keystone import exception
+from keystone.tests import unit as tests
+
+
+class V3ControllerTestCase(tests.TestCase):
+    """Tests for the V3Controller class."""
+    def setUp(self):
+        super(V3ControllerTestCase, self).setUp()
+
+        class ControllerUnderTest(controller.V3Controller):
+            _mutable_parameters = frozenset(['hello', 'world'])
+
+        self.api = ControllerUnderTest()
+
+    def test_check_immutable_params(self):
+        """Pass valid parameters to the method and expect no failure."""
+        ref = {
+            'hello': uuid.uuid4().hex,
+            'world': uuid.uuid4().hex
+        }
+        self.api.check_immutable_params(ref)
+
+    def test_check_immutable_params_fail(self):
+        """Pass invalid parameter to the method and expect failure."""
+        ref = {uuid.uuid4().hex: uuid.uuid4().hex for _ in range(3)}
+
+        ex = self.assertRaises(exception.ImmutableAttributeError,
+                               self.api.check_immutable_params, ref)
+        ex_msg = six.text_type(ex)
+        self.assertThat(ex_msg, matchers.Contains(self.api.__class__.__name__))
+        for key in ref.keys():
+            self.assertThat(ex_msg, matchers.Contains(key))
diff --git a/keystone-moon/keystone/tests/unit/test_v3_credential.py b/keystone-moon/keystone/tests/unit/test_v3_credential.py
new file mode 100644 (file)
index 0000000..d792b21
--- /dev/null
@@ -0,0 +1,406 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import hashlib
+import json
+import uuid
+
+from keystoneclient.contrib.ec2 import utils as ec2_utils
+from oslo_config import cfg
+
+from keystone import exception
+from keystone.tests.unit import test_v3
+
+
+CONF = cfg.CONF
+
+
+class CredentialBaseTestCase(test_v3.RestfulTestCase):
+    def _create_dict_blob_credential(self):
+        blob = {"access": uuid.uuid4().hex,
+                "secret": uuid.uuid4().hex}
+        credential_id = hashlib.sha256(blob['access']).hexdigest()
+        credential = self.new_credential_ref(
+            user_id=self.user['id'],
+            project_id=self.project_id)
+        credential['id'] = credential_id
+
+        # Store the blob as a dict *not* JSON ref bug #1259584
+        # This means we can test the dict->json workaround, added
+        # as part of the bugfix for backwards compatibility works.
+        credential['blob'] = blob
+        credential['type'] = 'ec2'
+        # Create direct via the DB API to avoid validation failure
+        self.credential_api.create_credential(
+            credential_id,
+            credential)
+        expected_blob = json.dumps(blob)
+        return expected_blob, credential_id
+
+
+class CredentialTestCase(CredentialBaseTestCase):
+    """Test credential CRUD."""
+    def setUp(self):
+
+        super(CredentialTestCase, self).setUp()
+
+        self.credential_id = uuid.uuid4().hex
+        self.credential = self.new_credential_ref(
+            user_id=self.user['id'],
+            project_id=self.project_id)
+        self.credential['id'] = self.credential_id
+        self.credential_api.create_credential(
+            self.credential_id,
+            self.credential)
+
+    def test_credential_api_delete_credentials_for_project(self):
+        self.credential_api.delete_credentials_for_project(self.project_id)
+        # Test that the credential that we created in .setUp no longer exists
+        # once we delete all credentials for self.project_id
+        self.assertRaises(exception.CredentialNotFound,
+                          self.credential_api.get_credential,
+                          credential_id=self.credential_id)
+
+    def test_credential_api_delete_credentials_for_user(self):
+        self.credential_api.delete_credentials_for_user(self.user_id)
+        # Test that the credential that we created in .setUp no longer exists
+        # once we delete all credentials for self.user_id
+        self.assertRaises(exception.CredentialNotFound,
+                          self.credential_api.get_credential,
+                          credential_id=self.credential_id)
+
+    def test_list_credentials(self):
+        """Call ``GET /credentials``."""
+        r = self.get('/credentials')
+        self.assertValidCredentialListResponse(r, ref=self.credential)
+
+    def test_list_credentials_filtered_by_user_id(self):
+        """Call ``GET  /credentials?user_id={user_id}``."""
+        credential = self.new_credential_ref(
+            user_id=uuid.uuid4().hex)
+        self.credential_api.create_credential(
+            credential['id'], credential)
+
+        r = self.get('/credentials?user_id=%s' % self.user['id'])
+        self.assertValidCredentialListResponse(r, ref=self.credential)
+        for cred in r.result['credentials']:
+            self.assertEqual(self.user['id'], cred['user_id'])
+
+    def test_create_credential(self):
+        """Call ``POST /credentials``."""
+        ref = self.new_credential_ref(user_id=self.user['id'])
+        r = self.post(
+            '/credentials',
+            body={'credential': ref})
+        self.assertValidCredentialResponse(r, ref)
+
+    def test_get_credential(self):
+        """Call ``GET /credentials/{credential_id}``."""
+        r = self.get(
+            '/credentials/%(credential_id)s' % {
+                'credential_id': self.credential_id})
+        self.assertValidCredentialResponse(r, self.credential)
+
+    def test_update_credential(self):
+        """Call ``PATCH /credentials/{credential_id}``."""
+        ref = self.new_credential_ref(
+            user_id=self.user['id'],
+            project_id=self.project_id)
+        del ref['id']
+        r = self.patch(
+            '/credentials/%(credential_id)s' % {
+                'credential_id': self.credential_id},
+            body={'credential': ref})
+        self.assertValidCredentialResponse(r, ref)
+
+    def test_delete_credential(self):
+        """Call ``DELETE /credentials/{credential_id}``."""
+        self.delete(
+            '/credentials/%(credential_id)s' % {
+                'credential_id': self.credential_id})
+
+    def test_create_ec2_credential(self):
+        """Call ``POST /credentials`` for creating ec2 credential."""
+        ref = self.new_credential_ref(user_id=self.user['id'],
+                                      project_id=self.project_id)
+        blob = {"access": uuid.uuid4().hex,
+                "secret": uuid.uuid4().hex}
+        ref['blob'] = json.dumps(blob)
+        ref['type'] = 'ec2'
+        r = self.post(
+            '/credentials',
+            body={'credential': ref})
+        self.assertValidCredentialResponse(r, ref)
+        # Assert credential id is same as hash of access key id for
+        # ec2 credentials
+        self.assertEqual(r.result['credential']['id'],
+                         hashlib.sha256(blob['access']).hexdigest())
+        # Create second ec2 credential with the same access key id and check
+        # for conflict.
+        self.post(
+            '/credentials',
+            body={'credential': ref}, expected_status=409)
+
+    def test_get_ec2_dict_blob(self):
+        """Ensure non-JSON blob data is correctly converted."""
+        expected_blob, credential_id = self._create_dict_blob_credential()
+
+        r = self.get(
+            '/credentials/%(credential_id)s' % {
+                'credential_id': credential_id})
+        self.assertEqual(expected_blob, r.result['credential']['blob'])
+
+    def test_list_ec2_dict_blob(self):
+        """Ensure non-JSON blob data is correctly converted."""
+        expected_blob, credential_id = self._create_dict_blob_credential()
+
+        list_r = self.get('/credentials')
+        list_creds = list_r.result['credentials']
+        list_ids = [r['id'] for r in list_creds]
+        self.assertIn(credential_id, list_ids)
+        for r in list_creds:
+            if r['id'] == credential_id:
+                self.assertEqual(expected_blob, r['blob'])
+
+    def test_create_non_ec2_credential(self):
+        """Call ``POST /credentials`` for creating non-ec2 credential."""
+        ref = self.new_credential_ref(user_id=self.user['id'])
+        blob = {"access": uuid.uuid4().hex,
+                "secret": uuid.uuid4().hex}
+        ref['blob'] = json.dumps(blob)
+        r = self.post(
+            '/credentials',
+            body={'credential': ref})
+        self.assertValidCredentialResponse(r, ref)
+        # Assert credential id is not same as hash of access key id for
+        # non-ec2 credentials
+        self.assertNotEqual(r.result['credential']['id'],
+                            hashlib.sha256(blob['access']).hexdigest())
+
+    def test_create_ec2_credential_with_missing_project_id(self):
+        """Call ``POST /credentials`` for creating ec2
+           credential with missing project_id.
+        """
+        ref = self.new_credential_ref(user_id=self.user['id'])
+        blob = {"access": uuid.uuid4().hex,
+                "secret": uuid.uuid4().hex}
+        ref['blob'] = json.dumps(blob)
+        ref['type'] = 'ec2'
+        # Assert 400 status for bad request with missing project_id
+        self.post(
+            '/credentials',
+            body={'credential': ref}, expected_status=400)
+
+    def test_create_ec2_credential_with_invalid_blob(self):
+        """Call ``POST /credentials`` for creating ec2
+           credential with invalid blob.
+        """
+        ref = self.new_credential_ref(user_id=self.user['id'],
+                                      project_id=self.project_id)
+        ref['blob'] = '{"abc":"def"d}'
+        ref['type'] = 'ec2'
+        # Assert 400 status for bad request containing invalid
+        # blob
+        response = self.post(
+            '/credentials',
+            body={'credential': ref}, expected_status=400)
+        self.assertValidErrorResponse(response)
+
+    def test_create_credential_with_admin_token(self):
+        # Make sure we can create credential with the static admin token
+        ref = self.new_credential_ref(user_id=self.user['id'])
+        r = self.post(
+            '/credentials',
+            body={'credential': ref},
+            token=CONF.admin_token)
+        self.assertValidCredentialResponse(r, ref)
+
+
+class TestCredentialTrustScoped(test_v3.RestfulTestCase):
+    """Test credential with trust scoped token."""
+    def setUp(self):
+        super(TestCredentialTrustScoped, self).setUp()
+
+        self.trustee_user = self.new_user_ref(domain_id=self.domain_id)
+        password = self.trustee_user['password']
+        self.trustee_user = self.identity_api.create_user(self.trustee_user)
+        self.trustee_user['password'] = password
+        self.trustee_user_id = self.trustee_user['id']
+
+    def config_overrides(self):
+        super(TestCredentialTrustScoped, self).config_overrides()
+        self.config_fixture.config(group='trust', enabled=True)
+
+    def test_trust_scoped_ec2_credential(self):
+        """Call ``POST /credentials`` for creating ec2 credential."""
+        # Create the trust
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.trustee_user_id,
+            project_id=self.project_id,
+            impersonation=True,
+            expires=dict(minutes=1),
+            role_ids=[self.role_id])
+        del ref['id']
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+        trust = self.assertValidTrustResponse(r)
+
+        # Get a trust scoped token
+        auth_data = self.build_authentication_request(
+            user_id=self.trustee_user['id'],
+            password=self.trustee_user['password'],
+            trust_id=trust['id'])
+        r = self.v3_authenticate_token(auth_data)
+        self.assertValidProjectTrustScopedTokenResponse(r, self.user)
+        trust_id = r.result['token']['OS-TRUST:trust']['id']
+        token_id = r.headers.get('X-Subject-Token')
+
+        # Create the credential with the trust scoped token
+        ref = self.new_credential_ref(user_id=self.user['id'],
+                                      project_id=self.project_id)
+        blob = {"access": uuid.uuid4().hex,
+                "secret": uuid.uuid4().hex}
+        ref['blob'] = json.dumps(blob)
+        ref['type'] = 'ec2'
+        r = self.post(
+            '/credentials',
+            body={'credential': ref},
+            token=token_id)
+
+        # We expect the response blob to contain the trust_id
+        ret_ref = ref.copy()
+        ret_blob = blob.copy()
+        ret_blob['trust_id'] = trust_id
+        ret_ref['blob'] = json.dumps(ret_blob)
+        self.assertValidCredentialResponse(r, ref=ret_ref)
+
+        # Assert credential id is same as hash of access key id for
+        # ec2 credentials
+        self.assertEqual(r.result['credential']['id'],
+                         hashlib.sha256(blob['access']).hexdigest())
+
+        # Create second ec2 credential with the same access key id and check
+        # for conflict.
+        self.post(
+            '/credentials',
+            body={'credential': ref},
+            token=token_id,
+            expected_status=409)
+
+
+class TestCredentialEc2(CredentialBaseTestCase):
+    """Test v3 credential compatibility with ec2tokens."""
+    def setUp(self):
+        super(TestCredentialEc2, self).setUp()
+
+    def _validate_signature(self, access, secret):
+        """Test signature validation with the access/secret provided."""
+        signer = ec2_utils.Ec2Signer(secret)
+        params = {'SignatureMethod': 'HmacSHA256',
+                  'SignatureVersion': '2',
+                  'AWSAccessKeyId': access}
+        request = {'host': 'foo',
+                   'verb': 'GET',
+                   'path': '/bar',
+                   'params': params}
+        signature = signer.generate(request)
+
+        # Now make a request to validate the signed dummy request via the
+        # ec2tokens API.  This proves the v3 ec2 credentials actually work.
+        sig_ref = {'access': access,
+                   'signature': signature,
+                   'host': 'foo',
+                   'verb': 'GET',
+                   'path': '/bar',
+                   'params': params}
+        r = self.post(
+            '/ec2tokens',
+            body={'ec2Credentials': sig_ref},
+            expected_status=200)
+        self.assertValidTokenResponse(r)
+
+    def test_ec2_credential_signature_validate(self):
+        """Test signature validation with a v3 ec2 credential."""
+        ref = self.new_credential_ref(
+            user_id=self.user['id'],
+            project_id=self.project_id)
+        blob = {"access": uuid.uuid4().hex,
+                "secret": uuid.uuid4().hex}
+        ref['blob'] = json.dumps(blob)
+        ref['type'] = 'ec2'
+        r = self.post(
+            '/credentials',
+            body={'credential': ref})
+        self.assertValidCredentialResponse(r, ref)
+        # Assert credential id is same as hash of access key id
+        self.assertEqual(r.result['credential']['id'],
+                         hashlib.sha256(blob['access']).hexdigest())
+
+        cred_blob = json.loads(r.result['credential']['blob'])
+        self.assertEqual(blob, cred_blob)
+        self._validate_signature(access=cred_blob['access'],
+                                 secret=cred_blob['secret'])
+
+    def test_ec2_credential_signature_validate_legacy(self):
+        """Test signature validation with a legacy v3 ec2 credential."""
+        cred_json, credential_id = self._create_dict_blob_credential()
+        cred_blob = json.loads(cred_json)
+        self._validate_signature(access=cred_blob['access'],
+                                 secret=cred_blob['secret'])
+
+    def _get_ec2_cred_uri(self):
+        return '/users/%s/credentials/OS-EC2' % self.user_id
+
+    def _get_ec2_cred(self):
+        uri = self._get_ec2_cred_uri()
+        r = self.post(uri, body={'tenant_id': self.project_id})
+        return r.result['credential']
+
+    def test_ec2_create_credential(self):
+        """Test ec2 credential creation."""
+        ec2_cred = self._get_ec2_cred()
+        self.assertEqual(self.user_id, ec2_cred['user_id'])
+        self.assertEqual(self.project_id, ec2_cred['tenant_id'])
+        self.assertIsNone(ec2_cred['trust_id'])
+        self._validate_signature(access=ec2_cred['access'],
+                                 secret=ec2_cred['secret'])
+
+        return ec2_cred
+
+    def test_ec2_get_credential(self):
+        ec2_cred = self._get_ec2_cred()
+        uri = '/'.join([self._get_ec2_cred_uri(), ec2_cred['access']])
+        r = self.get(uri)
+        self.assertDictEqual(ec2_cred, r.result['credential'])
+
+    def test_ec2_list_credentials(self):
+        """Test ec2 credential listing."""
+        self._get_ec2_cred()
+        uri = self._get_ec2_cred_uri()
+        r = self.get(uri)
+        cred_list = r.result['credentials']
+        self.assertEqual(1, len(cred_list))
+
+    def test_ec2_delete_credential(self):
+        """Test ec2 credential deletion."""
+        ec2_cred = self._get_ec2_cred()
+        uri = '/'.join([self._get_ec2_cred_uri(), ec2_cred['access']])
+        cred_from_credential_api = (
+            self.credential_api
+            .list_credentials_for_user(self.user_id))
+        self.assertEqual(1, len(cred_from_credential_api))
+        self.delete(uri)
+        self.assertRaises(exception.CredentialNotFound,
+                          self.credential_api.get_credential,
+                          cred_from_credential_api[0]['id'])
diff --git a/keystone-moon/keystone/tests/unit/test_v3_domain_config.py b/keystone-moon/keystone/tests/unit/test_v3_domain_config.py
new file mode 100644 (file)
index 0000000..6f96f0e
--- /dev/null
@@ -0,0 +1,210 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import uuid
+
+from oslo_config import cfg
+
+from keystone import exception
+from keystone.tests.unit import test_v3
+
+
+CONF = cfg.CONF
+
+
+class DomainConfigTestCase(test_v3.RestfulTestCase):
+    """Test domain config support."""
+
+    def setUp(self):
+        super(DomainConfigTestCase, self).setUp()
+
+        self.domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.resource_api.create_domain(self.domain['id'], self.domain)
+        self.config = {'ldap': {'url': uuid.uuid4().hex,
+                                'user_tree_dn': uuid.uuid4().hex},
+                       'identity': {'driver': uuid.uuid4().hex}}
+
+    def test_create_config(self):
+        """Call ``PUT /domains/{domain_id}/config``."""
+        url = '/domains/%(domain_id)s/config' % {
+            'domain_id': self.domain['id']}
+        r = self.put(url, body={'config': self.config},
+                     expected_status=201)
+        res = self.domain_config_api.get_config(self.domain['id'])
+        self.assertEqual(self.config, r.result['config'])
+        self.assertEqual(self.config, res)
+
+    def test_create_config_twice(self):
+        """Check multiple creates don't throw error"""
+        self.put('/domains/%(domain_id)s/config' % {
+            'domain_id': self.domain['id']},
+            body={'config': self.config},
+            expected_status=201)
+        self.put('/domains/%(domain_id)s/config' % {
+            'domain_id': self.domain['id']},
+            body={'config': self.config},
+            expected_status=200)
+
+    def test_delete_config(self):
+        """Call ``DELETE /domains{domain_id}/config``."""
+        self.domain_config_api.create_config(self.domain['id'], self.config)
+        self.delete('/domains/%(domain_id)s/config' % {
+            'domain_id': self.domain['id']})
+        self.get('/domains/%(domain_id)s/config' % {
+            'domain_id': self.domain['id']},
+            expected_status=exception.DomainConfigNotFound.code)
+
+    def test_delete_config_by_group(self):
+        """Call ``DELETE /domains{domain_id}/config/{group}``."""
+        self.domain_config_api.create_config(self.domain['id'], self.config)
+        self.delete('/domains/%(domain_id)s/config/ldap' % {
+            'domain_id': self.domain['id']})
+        res = self.domain_config_api.get_config(self.domain['id'])
+        self.assertNotIn('ldap', res)
+
+    def test_get_head_config(self):
+        """Call ``GET & HEAD for /domains{domain_id}/config``."""
+        self.domain_config_api.create_config(self.domain['id'], self.config)
+        url = '/domains/%(domain_id)s/config' % {
+            'domain_id': self.domain['id']}
+        r = self.get(url)
+        self.assertEqual(self.config, r.result['config'])
+        self.head(url, expected_status=200)
+
+    def test_get_config_by_group(self):
+        """Call ``GET & HEAD /domains{domain_id}/config/{group}``."""
+        self.domain_config_api.create_config(self.domain['id'], self.config)
+        url = '/domains/%(domain_id)s/config/ldap' % {
+            'domain_id': self.domain['id']}
+        r = self.get(url)
+        self.assertEqual({'ldap': self.config['ldap']}, r.result['config'])
+        self.head(url, expected_status=200)
+
+    def test_get_config_by_option(self):
+        """Call ``GET & HEAD /domains{domain_id}/config/{group}/{option}``."""
+        self.domain_config_api.create_config(self.domain['id'], self.config)
+        url = '/domains/%(domain_id)s/config/ldap/url' % {
+            'domain_id': self.domain['id']}
+        r = self.get(url)
+        self.assertEqual({'url': self.config['ldap']['url']},
+                         r.result['config'])
+        self.head(url, expected_status=200)
+
+    def test_get_non_existant_config(self):
+        """Call ``GET /domains{domain_id}/config when no config defined``."""
+        self.get('/domains/%(domain_id)s/config' % {
+            'domain_id': self.domain['id']}, expected_status=404)
+
+    def test_get_non_existant_config_group(self):
+        """Call ``GET /domains{domain_id}/config/{group_not_exist}``."""
+        config = {'ldap': {'url': uuid.uuid4().hex}}
+        self.domain_config_api.create_config(self.domain['id'], config)
+        self.get('/domains/%(domain_id)s/config/identity' % {
+            'domain_id': self.domain['id']}, expected_status=404)
+
+    def test_get_non_existant_config_option(self):
+        """Call ``GET /domains{domain_id}/config/group/{option_not_exist}``."""
+        config = {'ldap': {'url': uuid.uuid4().hex}}
+        self.domain_config_api.create_config(self.domain['id'], config)
+        self.get('/domains/%(domain_id)s/config/ldap/user_tree_dn' % {
+            'domain_id': self.domain['id']}, expected_status=404)
+
+    def test_update_config(self):
+        """Call ``PATCH /domains/{domain_id}/config``."""
+        self.domain_config_api.create_config(self.domain['id'], self.config)
+        new_config = {'ldap': {'url': uuid.uuid4().hex},
+                      'identity': {'driver': uuid.uuid4().hex}}
+        r = self.patch('/domains/%(domain_id)s/config' % {
+            'domain_id': self.domain['id']},
+            body={'config': new_config})
+        res = self.domain_config_api.get_config(self.domain['id'])
+        expected_config = copy.deepcopy(self.config)
+        expected_config['ldap']['url'] = new_config['ldap']['url']
+        expected_config['identity']['driver'] = (
+            new_config['identity']['driver'])
+        self.assertEqual(expected_config, r.result['config'])
+        self.assertEqual(expected_config, res)
+
+    def test_update_config_group(self):
+        """Call ``PATCH /domains/{domain_id}/config/{group}``."""
+        self.domain_config_api.create_config(self.domain['id'], self.config)
+        new_config = {'ldap': {'url': uuid.uuid4().hex,
+                               'user_filter': uuid.uuid4().hex}}
+        r = self.patch('/domains/%(domain_id)s/config/ldap' % {
+            'domain_id': self.domain['id']},
+            body={'config': new_config})
+        res = self.domain_config_api.get_config(self.domain['id'])
+        expected_config = copy.deepcopy(self.config)
+        expected_config['ldap']['url'] = new_config['ldap']['url']
+        expected_config['ldap']['user_filter'] = (
+            new_config['ldap']['user_filter'])
+        self.assertEqual(expected_config, r.result['config'])
+        self.assertEqual(expected_config, res)
+
+    def test_update_config_invalid_group(self):
+        """Call ``PATCH /domains/{domain_id}/config/{invalid_group}``."""
+        self.domain_config_api.create_config(self.domain['id'], self.config)
+
+        # Trying to update a group that is neither whitelisted or sensitive
+        # should result in Forbidden.
+        invalid_group = uuid.uuid4().hex
+        new_config = {invalid_group: {'url': uuid.uuid4().hex,
+                                      'user_filter': uuid.uuid4().hex}}
+        self.patch('/domains/%(domain_id)s/config/%(invalid_group)s' % {
+            'domain_id': self.domain['id'], 'invalid_group': invalid_group},
+            body={'config': new_config},
+            expected_status=403)
+        # Trying to update a valid group, but one that is not in the current
+        # config should result in NotFound
+        config = {'ldap': {'suffix': uuid.uuid4().hex}}
+        self.domain_config_api.create_config(self.domain['id'], config)
+        new_config = {'identity': {'driver': uuid.uuid4().hex}}
+        self.patch('/domains/%(domain_id)s/config/identity' % {
+            'domain_id': self.domain['id']},
+            body={'config': new_config},
+            expected_status=404)
+
+    def test_update_config_option(self):
+        """Call ``PATCH /domains/{domain_id}/config/{group}/{option}``."""
+        self.domain_config_api.create_config(self.domain['id'], self.config)
+        new_config = {'url': uuid.uuid4().hex}
+        r = self.patch('/domains/%(domain_id)s/config/ldap/url' % {
+            'domain_id': self.domain['id']},
+            body={'config': new_config})
+        res = self.domain_config_api.get_config(self.domain['id'])
+        expected_config = copy.deepcopy(self.config)
+        expected_config['ldap']['url'] = new_config['url']
+        self.assertEqual(expected_config, r.result['config'])
+        self.assertEqual(expected_config, res)
+
+    def test_update_config_invalid_option(self):
+        """Call ``PATCH /domains/{domain_id}/config/{group}/{invalid}``."""
+        self.domain_config_api.create_config(self.domain['id'], self.config)
+        invalid_option = uuid.uuid4().hex
+        new_config = {'ldap': {invalid_option: uuid.uuid4().hex}}
+        # Trying to update an option that is neither whitelisted or sensitive
+        # should result in Forbidden.
+        self.patch(
+            '/domains/%(domain_id)s/config/ldap/%(invalid_option)s' % {
+                'domain_id': self.domain['id'],
+                'invalid_option': invalid_option},
+            body={'config': new_config},
+            expected_status=403)
+        # Trying to update a valid option, but one that is not in the current
+        # config should result in NotFound
+        new_config = {'suffix': uuid.uuid4().hex}
+        self.patch(
+            '/domains/%(domain_id)s/config/ldap/suffix' % {
+                'domain_id': self.domain['id']},
+            body={'config': new_config},
+            expected_status=404)
diff --git a/keystone-moon/keystone/tests/unit/test_v3_endpoint_policy.py b/keystone-moon/keystone/tests/unit/test_v3_endpoint_policy.py
new file mode 100644 (file)
index 0000000..437fb15
--- /dev/null
@@ -0,0 +1,251 @@
+# Copyright 2014 IBM Corp.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from testtools import matchers
+
+from keystone.tests.unit import test_v3
+
+
+class TestExtensionCase(test_v3.RestfulTestCase):
+
+    EXTENSION_NAME = 'endpoint_policy'
+    EXTENSION_TO_ADD = 'endpoint_policy_extension'
+
+
+class EndpointPolicyTestCase(TestExtensionCase):
+    """Test endpoint policy CRUD.
+
+    In general, the controller layer of the endpoint policy extension is really
+    just marshalling the data around the underlying manager calls. Given that
+    the manager layer is tested in depth by the backend tests, the tests we
+    execute here concentrate on ensuring we are correctly passing and
+    presenting the data.
+
+    """
+
+    def setUp(self):
+        super(EndpointPolicyTestCase, self).setUp()
+        self.policy = self.new_policy_ref()
+        self.policy_api.create_policy(self.policy['id'], self.policy)
+        self.service = self.new_service_ref()
+        self.catalog_api.create_service(self.service['id'], self.service)
+        self.endpoint = self.new_endpoint_ref(self.service['id'], enabled=True)
+        self.catalog_api.create_endpoint(self.endpoint['id'], self.endpoint)
+        self.region = self.new_region_ref()
+        self.catalog_api.create_region(self.region)
+
+    def assert_head_and_get_return_same_response(self, url, expected_status):
+        self.get(url, expected_status=expected_status)
+        self.head(url, expected_status=expected_status)
+
+    # endpoint policy crud tests
+    def _crud_test(self, url):
+        # Test when the resource does not exist also ensures
+        # that there is not a false negative after creation.
+
+        self.assert_head_and_get_return_same_response(url, expected_status=404)
+
+        self.put(url, expected_status=204)
+
+        # test that the new resource is accessible.
+        self.assert_head_and_get_return_same_response(url, expected_status=204)
+
+        self.delete(url, expected_status=204)
+
+        # test that the deleted resource is no longer accessible
+        self.assert_head_and_get_return_same_response(url, expected_status=404)
+
+    def test_crud_for_policy_for_explicit_endpoint(self):
+        """PUT, HEAD and DELETE for explicit endpoint policy."""
+
+        url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
+               '/endpoints/%(endpoint_id)s') % {
+                   'policy_id': self.policy['id'],
+                   'endpoint_id': self.endpoint['id']}
+        self._crud_test(url)
+
+    def test_crud_for_policy_for_service(self):
+        """PUT, HEAD and DELETE for service endpoint policy."""
+
+        url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
+               '/services/%(service_id)s') % {
+                   'policy_id': self.policy['id'],
+                   'service_id': self.service['id']}
+        self._crud_test(url)
+
+    def test_crud_for_policy_for_region_and_service(self):
+        """PUT, HEAD and DELETE for region and service endpoint policy."""
+
+        url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
+               '/services/%(service_id)s/regions/%(region_id)s') % {
+                   'policy_id': self.policy['id'],
+                   'service_id': self.service['id'],
+                   'region_id': self.region['id']}
+        self._crud_test(url)
+
+    def test_get_policy_for_endpoint(self):
+        """GET /endpoints/{endpoint_id}/policy."""
+
+        self.put('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
+                 '/endpoints/%(endpoint_id)s' % {
+                     'policy_id': self.policy['id'],
+                     'endpoint_id': self.endpoint['id']},
+                 expected_status=204)
+
+        self.head('/endpoints/%(endpoint_id)s/OS-ENDPOINT-POLICY'
+                  '/policy' % {
+                      'endpoint_id': self.endpoint['id']},
+                  expected_status=200)
+
+        r = self.get('/endpoints/%(endpoint_id)s/OS-ENDPOINT-POLICY'
+                     '/policy' % {
+                         'endpoint_id': self.endpoint['id']},
+                     expected_status=200)
+        self.assertValidPolicyResponse(r, ref=self.policy)
+
+    def test_list_endpoints_for_policy(self):
+        """GET /policies/%(policy_id}/endpoints."""
+
+        self.put('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
+                 '/endpoints/%(endpoint_id)s' % {
+                     'policy_id': self.policy['id'],
+                     'endpoint_id': self.endpoint['id']},
+                 expected_status=204)
+
+        r = self.get('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
+                     '/endpoints' % {
+                         'policy_id': self.policy['id']},
+                     expected_status=200)
+        self.assertValidEndpointListResponse(r, ref=self.endpoint)
+        self.assertThat(r.result.get('endpoints'), matchers.HasLength(1))
+
+    def test_endpoint_association_cleanup_when_endpoint_deleted(self):
+        url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
+               '/endpoints/%(endpoint_id)s') % {
+                   'policy_id': self.policy['id'],
+                   'endpoint_id': self.endpoint['id']}
+
+        self.put(url, expected_status=204)
+        self.head(url, expected_status=204)
+
+        self.delete('/endpoints/%(endpoint_id)s' % {
+            'endpoint_id': self.endpoint['id']})
+
+        self.head(url, expected_status=404)
+
+    def test_region_service_association_cleanup_when_region_deleted(self):
+        url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
+               '/services/%(service_id)s/regions/%(region_id)s') % {
+                   'policy_id': self.policy['id'],
+                   'service_id': self.service['id'],
+                   'region_id': self.region['id']}
+
+        self.put(url, expected_status=204)
+        self.head(url, expected_status=204)
+
+        self.delete('/regions/%(region_id)s' % {
+            'region_id': self.region['id']})
+
+        self.head(url, expected_status=404)
+
+    def test_region_service_association_cleanup_when_service_deleted(self):
+        url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
+               '/services/%(service_id)s/regions/%(region_id)s') % {
+                   'policy_id': self.policy['id'],
+                   'service_id': self.service['id'],
+                   'region_id': self.region['id']}
+
+        self.put(url, expected_status=204)
+        self.head(url, expected_status=204)
+
+        self.delete('/services/%(service_id)s' % {
+            'service_id': self.service['id']})
+
+        self.head(url, expected_status=404)
+
+    def test_service_association_cleanup_when_service_deleted(self):
+        url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
+               '/services/%(service_id)s') % {
+                   'policy_id': self.policy['id'],
+                   'service_id': self.service['id']}
+
+        self.put(url, expected_status=204)
+        self.get(url, expected_status=204)
+
+        self.delete('/policies/%(policy_id)s' % {
+            'policy_id': self.policy['id']})
+
+        self.head(url, expected_status=404)
+
+    def test_service_association_cleanup_when_policy_deleted(self):
+        url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
+               '/services/%(service_id)s') % {
+                   'policy_id': self.policy['id'],
+                   'service_id': self.service['id']}
+
+        self.put(url, expected_status=204)
+        self.get(url, expected_status=204)
+
+        self.delete('/services/%(service_id)s' % {
+            'service_id': self.service['id']})
+
+        self.head(url, expected_status=404)
+
+
+class JsonHomeTests(TestExtensionCase, test_v3.JsonHomeTestMixin):
+    EXTENSION_LOCATION = ('http://docs.openstack.org/api/openstack-identity/3/'
+                          'ext/OS-ENDPOINT-POLICY/1.0/rel')
+    PARAM_LOCATION = 'http://docs.openstack.org/api/openstack-identity/3/param'
+
+    JSON_HOME_DATA = {
+        EXTENSION_LOCATION + '/endpoint_policy': {
+            'href-template': '/endpoints/{endpoint_id}/OS-ENDPOINT-POLICY/'
+                             'policy',
+            'href-vars': {
+                'endpoint_id': PARAM_LOCATION + '/endpoint_id',
+            },
+        },
+        EXTENSION_LOCATION + '/policy_endpoints': {
+            'href-template': '/policies/{policy_id}/OS-ENDPOINT-POLICY/'
+                             'endpoints',
+            'href-vars': {
+                'policy_id': PARAM_LOCATION + '/policy_id',
+            },
+        },
+        EXTENSION_LOCATION + '/endpoint_policy_association': {
+            'href-template': '/policies/{policy_id}/OS-ENDPOINT-POLICY/'
+                             'endpoints/{endpoint_id}',
+            'href-vars': {
+                'policy_id': PARAM_LOCATION + '/policy_id',
+                'endpoint_id': PARAM_LOCATION + '/endpoint_id',
+            },
+        },
+        EXTENSION_LOCATION + '/service_policy_association': {
+            'href-template': '/policies/{policy_id}/OS-ENDPOINT-POLICY/'
+                             'services/{service_id}',
+            'href-vars': {
+                'policy_id': PARAM_LOCATION + '/policy_id',
+                'service_id': PARAM_LOCATION + '/service_id',
+            },
+        },
+        EXTENSION_LOCATION + '/region_and_service_policy_association': {
+            'href-template': '/policies/{policy_id}/OS-ENDPOINT-POLICY/'
+                             'services/{service_id}/regions/{region_id}',
+            'href-vars': {
+                'policy_id': PARAM_LOCATION + '/policy_id',
+                'service_id': PARAM_LOCATION + '/service_id',
+                'region_id': PARAM_LOCATION + '/region_id',
+            },
+        },
+    }
diff --git a/keystone-moon/keystone/tests/unit/test_v3_federation.py b/keystone-moon/keystone/tests/unit/test_v3_federation.py
new file mode 100644 (file)
index 0000000..3b6f4d8
--- /dev/null
@@ -0,0 +1,3296 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import random
+import subprocess
+import uuid
+
+from lxml import etree
+import mock
+from oslo_config import cfg
+from oslo_log import log
+from oslo_serialization import jsonutils
+from oslotest import mockpatch
+import saml2
+from saml2 import saml
+from saml2 import sigver
+from six.moves import urllib
+import xmldsig
+
+from keystone.auth import controllers as auth_controllers
+from keystone.auth.plugins import mapped
+from keystone.contrib import federation
+from keystone.contrib.federation import controllers as federation_controllers
+from keystone.contrib.federation import idp as keystone_idp
+from keystone.contrib.federation import utils as mapping_utils
+from keystone import exception
+from keystone import notifications
+from keystone.tests.unit import core
+from keystone.tests.unit import federation_fixtures
+from keystone.tests.unit import ksfixtures
+from keystone.tests.unit import mapping_fixtures
+from keystone.tests.unit import test_v3
+from keystone.token.providers import common as token_common
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+ROOTDIR = os.path.dirname(os.path.abspath(__file__))
+XMLDIR = os.path.join(ROOTDIR, 'saml2/')
+
+
+def dummy_validator(*args, **kwargs):
+    pass
+
+
+class FederationTests(test_v3.RestfulTestCase):
+
+    EXTENSION_NAME = 'federation'
+    EXTENSION_TO_ADD = 'federation_extension'
+
+
+class FederatedSetupMixin(object):
+
+    ACTION = 'authenticate'
+    IDP = 'ORG_IDP'
+    PROTOCOL = 'saml2'
+    AUTH_METHOD = 'saml2'
+    USER = 'user@ORGANIZATION'
+    ASSERTION_PREFIX = 'PREFIX_'
+    IDP_WITH_REMOTE = 'ORG_IDP_REMOTE'
+    REMOTE_ID = 'entityID_IDP'
+    REMOTE_ID_ATTR = uuid.uuid4().hex
+
+    UNSCOPED_V3_SAML2_REQ = {
+        "identity": {
+            "methods": [AUTH_METHOD],
+            AUTH_METHOD: {
+                "identity_provider": IDP,
+                "protocol": PROTOCOL
+            }
+        }
+    }
+
+    def _check_domains_are_valid(self, token):
+        self.assertEqual('Federated', token['user']['domain']['id'])
+        self.assertEqual('Federated', token['user']['domain']['name'])
+
+    def _project(self, project):
+        return (project['id'], project['name'])
+
+    def _roles(self, roles):
+        return set([(r['id'], r['name']) for r in roles])
+
+    def _check_projects_and_roles(self, token, roles, projects):
+        """Check whether the projects and the roles match."""
+        token_roles = token.get('roles')
+        if token_roles is None:
+            raise AssertionError('Roles not found in the token')
+        token_roles = self._roles(token_roles)
+        roles_ref = self._roles(roles)
+        self.assertEqual(token_roles, roles_ref)
+
+        token_projects = token.get('project')
+        if token_projects is None:
+            raise AssertionError('Projects not found in the token')
+        token_projects = self._project(token_projects)
+        projects_ref = self._project(projects)
+        self.assertEqual(token_projects, projects_ref)
+
+    def _check_scoped_token_attributes(self, token):
+        def xor_project_domain(iterable):
+            return sum(('project' in iterable, 'domain' in iterable)) % 2
+
+        for obj in ('user', 'catalog', 'expires_at', 'issued_at',
+                    'methods', 'roles'):
+            self.assertIn(obj, token)
+        # Check for either project or domain
+        if not xor_project_domain(token.keys()):
+            raise AssertionError("You must specify either"
+                                 "project or domain.")
+
+        self.assertIn('OS-FEDERATION', token['user'])
+        os_federation = token['user']['OS-FEDERATION']
+        self.assertEqual(self.IDP, os_federation['identity_provider']['id'])
+        self.assertEqual(self.PROTOCOL, os_federation['protocol']['id'])
+
+    def _issue_unscoped_token(self,
+                              idp=None,
+                              assertion='EMPLOYEE_ASSERTION',
+                              environment=None):
+        api = federation_controllers.Auth()
+        context = {'environment': environment or {}}
+        self._inject_assertion(context, assertion)
+        if idp is None:
+            idp = self.IDP
+        r = api.federated_authentication(context, idp, self.PROTOCOL)
+        return r
+
+    def idp_ref(self, id=None):
+        idp = {
+            'id': id or uuid.uuid4().hex,
+            'enabled': True,
+            'description': uuid.uuid4().hex
+        }
+        return idp
+
+    def proto_ref(self, mapping_id=None):
+        proto = {
+            'id': uuid.uuid4().hex,
+            'mapping_id': mapping_id or uuid.uuid4().hex
+        }
+        return proto
+
+    def mapping_ref(self, rules=None):
+        return {
+            'id': uuid.uuid4().hex,
+            'rules': rules or self.rules['rules']
+        }
+
+    def _scope_request(self, unscoped_token_id, scope, scope_id):
+        return {
+            'auth': {
+                'identity': {
+                    'methods': [
+                        self.AUTH_METHOD
+                    ],
+                    self.AUTH_METHOD: {
+                        'id': unscoped_token_id
+                    }
+                },
+                'scope': {
+                    scope: {
+                        'id': scope_id
+                    }
+                }
+            }
+        }
+
+    def _inject_assertion(self, context, variant, query_string=None):
+        assertion = getattr(mapping_fixtures, variant)
+        context['environment'].update(assertion)
+        context['query_string'] = query_string or []
+
+    def load_federation_sample_data(self):
+        """Inject additional data."""
+
+        # Create and add domains
+        self.domainA = self.new_domain_ref()
+        self.resource_api.create_domain(self.domainA['id'],
+                                        self.domainA)
+
+        self.domainB = self.new_domain_ref()
+        self.resource_api.create_domain(self.domainB['id'],
+                                        self.domainB)
+
+        self.domainC = self.new_domain_ref()
+        self.resource_api.create_domain(self.domainC['id'],
+                                        self.domainC)
+
+        self.domainD = self.new_domain_ref()
+        self.resource_api.create_domain(self.domainD['id'],
+                                        self.domainD)
+
+        # Create and add projects
+        self.proj_employees = self.new_project_ref(
+            domain_id=self.domainA['id'])
+        self.resource_api.create_project(self.proj_employees['id'],
+                                         self.proj_employees)
+        self.proj_customers = self.new_project_ref(
+            domain_id=self.domainA['id'])
+        self.resource_api.create_project(self.proj_customers['id'],
+                                         self.proj_customers)
+
+        self.project_all = self.new_project_ref(
+            domain_id=self.domainA['id'])
+        self.resource_api.create_project(self.project_all['id'],
+                                         self.project_all)
+
+        self.project_inherited = self.new_project_ref(
+            domain_id=self.domainD['id'])
+        self.resource_api.create_project(self.project_inherited['id'],
+                                         self.project_inherited)
+
+        # Create and add groups
+        self.group_employees = self.new_group_ref(
+            domain_id=self.domainA['id'])
+        self.group_employees = (
+            self.identity_api.create_group(self.group_employees))
+
+        self.group_customers = self.new_group_ref(
+            domain_id=self.domainA['id'])
+        self.group_customers = (
+            self.identity_api.create_group(self.group_customers))
+
+        self.group_admins = self.new_group_ref(
+            domain_id=self.domainA['id'])
+        self.group_admins = self.identity_api.create_group(self.group_admins)
+
+        # Create and add roles
+        self.role_employee = self.new_role_ref()
+        self.role_api.create_role(self.role_employee['id'], self.role_employee)
+        self.role_customer = self.new_role_ref()
+        self.role_api.create_role(self.role_customer['id'], self.role_customer)
+
+        self.role_admin = self.new_role_ref()
+        self.role_api.create_role(self.role_admin['id'], self.role_admin)
+
+        # Employees can access
+        # * proj_employees
+        # * project_all
+        self.assignment_api.create_grant(self.role_employee['id'],
+                                         group_id=self.group_employees['id'],
+                                         project_id=self.proj_employees['id'])
+        self.assignment_api.create_grant(self.role_employee['id'],
+                                         group_id=self.group_employees['id'],
+                                         project_id=self.project_all['id'])
+        # Customers can access
+        # * proj_customers
+        self.assignment_api.create_grant(self.role_customer['id'],
+                                         group_id=self.group_customers['id'],
+                                         project_id=self.proj_customers['id'])
+
+        # Admins can access:
+        # * proj_customers
+        # * proj_employees
+        # * project_all
+        self.assignment_api.create_grant(self.role_admin['id'],
+                                         group_id=self.group_admins['id'],
+                                         project_id=self.proj_customers['id'])
+        self.assignment_api.create_grant(self.role_admin['id'],
+                                         group_id=self.group_admins['id'],
+                                         project_id=self.proj_employees['id'])
+        self.assignment_api.create_grant(self.role_admin['id'],
+                                         group_id=self.group_admins['id'],
+                                         project_id=self.project_all['id'])
+
+        self.assignment_api.create_grant(self.role_customer['id'],
+                                         group_id=self.group_customers['id'],
+                                         domain_id=self.domainA['id'])
+
+        # Customers can access:
+        # * domain A
+        self.assignment_api.create_grant(self.role_customer['id'],
+                                         group_id=self.group_customers['id'],
+                                         domain_id=self.domainA['id'])
+
+        # Customers can access projects via inheritance:
+        # * domain D
+        self.assignment_api.create_grant(self.role_customer['id'],
+                                         group_id=self.group_customers['id'],
+                                         domain_id=self.domainD['id'],
+                                         inherited_to_projects=True)
+
+        # Employees can access:
+        # * domain A
+        # * domain B
+
+        self.assignment_api.create_grant(self.role_employee['id'],
+                                         group_id=self.group_employees['id'],
+                                         domain_id=self.domainA['id'])
+        self.assignment_api.create_grant(self.role_employee['id'],
+                                         group_id=self.group_employees['id'],
+                                         domain_id=self.domainB['id'])
+
+        # Admins can access:
+        # * domain A
+        # * domain B
+        # * domain C
+        self.assignment_api.create_grant(self.role_admin['id'],
+                                         group_id=self.group_admins['id'],
+                                         domain_id=self.domainA['id'])
+        self.assignment_api.create_grant(self.role_admin['id'],
+                                         group_id=self.group_admins['id'],
+                                         domain_id=self.domainB['id'])
+
+        self.assignment_api.create_grant(self.role_admin['id'],
+                                         group_id=self.group_admins['id'],
+                                         domain_id=self.domainC['id'])
+        self.rules = {
+            'rules': [
+                {
+                    'local': [
+                        {
+                            'group': {
+                                'id': self.group_employees['id']
+                            }
+                        },
+                        {
+                            'user': {
+                                'name': '{0}'
+                            }
+                        }
+                    ],
+                    'remote': [
+                        {
+                            'type': 'UserName'
+                        },
+                        {
+                            'type': 'orgPersonType',
+                            'any_one_of': [
+                                'Employee'
+                            ]
+                        }
+                    ]
+                },
+                {
+                    'local': [
+                        {
+                            'group': {
+                                'id': self.group_employees['id']
+                            }
+                        },
+                        {
+                            'user': {
+                                'name': '{0}'
+                            }
+                        }
+                    ],
+                    'remote': [
+                        {
+                            'type': self.ASSERTION_PREFIX + 'UserName'
+                        },
+                        {
+                            'type': self.ASSERTION_PREFIX + 'orgPersonType',
+                            'any_one_of': [
+                                'SuperEmployee'
+                            ]
+                        }
+                    ]
+                },
+                {
+                    'local': [
+                        {
+                            'group': {
+                                'id': self.group_customers['id']
+                            }
+                        },
+                        {
+                            'user': {
+                                'name': '{0}'
+                            }
+                        }
+                    ],
+                    'remote': [
+                        {
+                            'type': 'UserName'
+                        },
+                        {
+                            'type': 'orgPersonType',
+                            'any_one_of': [
+                                'Customer'
+                            ]
+                        }
+                    ]
+                },
+                {
+                    'local': [
+                        {
+                            'group': {
+                                'id': self.group_admins['id']
+                            }
+                        },
+                        {
+                            'group': {
+                                'id': self.group_employees['id']
+                            }
+                        },
+                        {
+                            'group': {
+                                'id': self.group_customers['id']
+                            }
+                        },
+
+                        {
+                            'user': {
+                                'name': '{0}'
+                            }
+                        }
+                    ],
+                    'remote': [
+                        {
+                            'type': 'UserName'
+                        },
+                        {
+                            'type': 'orgPersonType',
+                            'any_one_of': [
+                                'Admin',
+                                'Chief'
+                            ]
+                        }
+                    ]
+                },
+                {
+                    'local': [
+                        {
+                            'group': {
+                                'id': uuid.uuid4().hex
+                            }
+                        },
+                        {
+                            'group': {
+                                'id': self.group_customers['id']
+                            }
+                        },
+                        {
+                            'user': {
+                                'name': '{0}'
+                            }
+                        }
+                    ],
+                    'remote': [
+                        {
+                            'type': 'UserName',
+                        },
+                        {
+                            'type': 'FirstName',
+                            'any_one_of': [
+                                'Jill'
+                            ]
+                        },
+                        {
+                            'type': 'LastName',
+                            'any_one_of': [
+                                'Smith'
+                            ]
+                        }
+                    ]
+                },
+                {
+                    'local': [
+                        {
+                            'group': {
+                                'id': 'this_group_no_longer_exists'
+                            }
+                        },
+                        {
+                            'user': {
+                                'name': '{0}'
+                            }
+                        }
+                    ],
+                    'remote': [
+                        {
+                            'type': 'UserName',
+                        },
+                        {
+                            'type': 'Email',
+                            'any_one_of': [
+                                'testacct@example.com'
+                            ]
+                        },
+                        {
+                            'type': 'orgPersonType',
+                            'any_one_of': [
+                                'Tester'
+                            ]
+                        }
+                    ]
+                },
+                # rules with local group names
+                {
+                    "local": [
+                        {
+                            'user': {
+                                'name': '{0}'
+                            }
+                        },
+                        {
+                            "group": {
+                                "name": self.group_customers['name'],
+                                "domain": {
+                                    "name": self.domainA['name']
+                                }
+                            }
+                        }
+                    ],
+                    "remote": [
+                        {
+                            'type': 'UserName',
+                        },
+                        {
+                            "type": "orgPersonType",
+                            "any_one_of": [
+                                "CEO",
+                                "CTO"
+                            ],
+                        }
+                    ]
+                },
+                {
+                    "local": [
+                        {
+                            'user': {
+                                'name': '{0}'
+                            }
+                        },
+                        {
+                            "group": {
+                                "name": self.group_admins['name'],
+                                "domain": {
+                                    "id": self.domainA['id']
+                                }
+                            }
+                        }
+                    ],
+                    "remote": [
+                        {
+                            "type": "UserName",
+                        },
+                        {
+                            "type": "orgPersonType",
+                            "any_one_of": [
+                                "Managers"
+                            ]
+                        }
+                    ]
+                },
+                {
+                    "local": [
+                        {
+                            "user": {
+                                "name": "{0}"
+                            }
+                        },
+                        {
+                            "group": {
+                                "name": "NON_EXISTING",
+                                "domain": {
+                                    "id": self.domainA['id']
+                                }
+                            }
+                        }
+                    ],
+                    "remote": [
+                        {
+                            "type": "UserName",
+                        },
+                        {
+                            "type": "UserName",
+                            "any_one_of": [
+                                "IamTester"
+                            ]
+                        }
+                    ]
+                },
+                {
+                    "local": [
+                        {
+                            "user": {
+                                "type": "local",
+                                "name": self.user['name'],
+                                "domain": {
+                                    "id": self.user['domain_id']
+                                }
+                            }
+                        },
+                        {
+                            "group": {
+                                "id": self.group_customers['id']
+                            }
+                        }
+                    ],
+                    "remote": [
+                        {
+                            "type": "UserType",
+                            "any_one_of": [
+                                "random"
+                            ]
+                        }
+                    ]
+                },
+                {
+                    "local": [
+                        {
+                            "user": {
+                                "type": "local",
+                                "name": self.user['name'],
+                                "domain": {
+                                    "id": uuid.uuid4().hex
+                                }
+                            }
+                        }
+                    ],
+                    "remote": [
+                        {
+                            "type": "Position",
+                            "any_one_of": [
+                                "DirectorGeneral"
+                            ]
+                        }
+                    ]
+                }
+            ]
+        }
+
+        # Add IDP
+        self.idp = self.idp_ref(id=self.IDP)
+        self.federation_api.create_idp(self.idp['id'],
+                                       self.idp)
+        # Add IDP with remote
+        self.idp_with_remote = self.idp_ref(id=self.IDP_WITH_REMOTE)
+        self.idp_with_remote['remote_id'] = self.REMOTE_ID
+        self.federation_api.create_idp(self.idp_with_remote['id'],
+                                       self.idp_with_remote)
+        # Add a mapping
+        self.mapping = self.mapping_ref()
+        self.federation_api.create_mapping(self.mapping['id'],
+                                           self.mapping)
+        # Add protocols
+        self.proto_saml = self.proto_ref(mapping_id=self.mapping['id'])
+        self.proto_saml['id'] = self.PROTOCOL
+        self.federation_api.create_protocol(self.idp['id'],
+                                            self.proto_saml['id'],
+                                            self.proto_saml)
+        # Add protocols IDP with remote
+        self.federation_api.create_protocol(self.idp_with_remote['id'],
+                                            self.proto_saml['id'],
+                                            self.proto_saml)
+        # Generate fake tokens
+        context = {'environment': {}}
+
+        self.tokens = {}
+        VARIANTS = ('EMPLOYEE_ASSERTION', 'CUSTOMER_ASSERTION',
+                    'ADMIN_ASSERTION')
+        api = auth_controllers.Auth()
+        for variant in VARIANTS:
+            self._inject_assertion(context, variant)
+            r = api.authenticate_for_token(context, self.UNSCOPED_V3_SAML2_REQ)
+            self.tokens[variant] = r.headers.get('X-Subject-Token')
+
+        self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN = self._scope_request(
+            uuid.uuid4().hex, 'project', self.proj_customers['id'])
+
+        self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE = self._scope_request(
+            self.tokens['EMPLOYEE_ASSERTION'], 'project',
+            self.proj_employees['id'])
+
+        self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN = self._scope_request(
+            self.tokens['ADMIN_ASSERTION'], 'project',
+            self.proj_employees['id'])
+
+        self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN = self._scope_request(
+            self.tokens['ADMIN_ASSERTION'], 'project',
+            self.proj_customers['id'])
+
+        self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER = self._scope_request(
+            self.tokens['CUSTOMER_ASSERTION'], 'project',
+            self.proj_employees['id'])
+
+        self.TOKEN_SCOPE_PROJECT_INHERITED_FROM_CUSTOMER = self._scope_request(
+            self.tokens['CUSTOMER_ASSERTION'], 'project',
+            self.project_inherited['id'])
+
+        self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER = self._scope_request(
+            self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainA['id'])
+
+        self.TOKEN_SCOPE_DOMAIN_B_FROM_CUSTOMER = self._scope_request(
+            self.tokens['CUSTOMER_ASSERTION'], 'domain',
+            self.domainB['id'])
+
+        self.TOKEN_SCOPE_DOMAIN_D_FROM_CUSTOMER = self._scope_request(
+            self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainD['id'])
+
+        self.TOKEN_SCOPE_DOMAIN_A_FROM_ADMIN = self._scope_request(
+            self.tokens['ADMIN_ASSERTION'], 'domain', self.domainA['id'])
+
+        self.TOKEN_SCOPE_DOMAIN_B_FROM_ADMIN = self._scope_request(
+            self.tokens['ADMIN_ASSERTION'], 'domain', self.domainB['id'])
+
+        self.TOKEN_SCOPE_DOMAIN_C_FROM_ADMIN = self._scope_request(
+            self.tokens['ADMIN_ASSERTION'], 'domain',
+            self.domainC['id'])
+
+
+class FederatedIdentityProviderTests(FederationTests):
+    """A test class for Identity Providers."""
+
+    idp_keys = ['description', 'enabled']
+
+    default_body = {'description': None, 'enabled': True}
+
+    def base_url(self, suffix=None):
+        if suffix is not None:
+            return '/OS-FEDERATION/identity_providers/' + str(suffix)
+        return '/OS-FEDERATION/identity_providers'
+
+    def _fetch_attribute_from_response(self, resp, parameter,
+                                       assert_is_not_none=True):
+        """Fetch single attribute from TestResponse object."""
+        result = resp.result.get(parameter)
+        if assert_is_not_none:
+            self.assertIsNotNone(result)
+        return result
+
+    def _create_and_decapsulate_response(self, body=None):
+        """Create IdP and fetch it's random id along with entity."""
+        default_resp = self._create_default_idp(body=body)
+        idp = self._fetch_attribute_from_response(default_resp,
+                                                  'identity_provider')
+        self.assertIsNotNone(idp)
+        idp_id = idp.get('id')
+        return (idp_id, idp)
+
+    def _get_idp(self, idp_id):
+        """Fetch IdP entity based on its id."""
+        url = self.base_url(suffix=idp_id)
+        resp = self.get(url)
+        return resp
+
+    def _create_default_idp(self, body=None):
+        """Create default IdP."""
+        url = self.base_url(suffix=uuid.uuid4().hex)
+        if body is None:
+            body = self._http_idp_input()
+        resp = self.put(url, body={'identity_provider': body},
+                        expected_status=201)
+        return resp
+
+    def _http_idp_input(self, **kwargs):
+        """Create default input for IdP data."""
+        body = None
+        if 'body' not in kwargs:
+            body = self.default_body.copy()
+            body['description'] = uuid.uuid4().hex
+        else:
+            body = kwargs['body']
+        return body
+
+    def _assign_protocol_to_idp(self, idp_id=None, proto=None, url=None,
+                                mapping_id=None, validate=True, **kwargs):
+        if url is None:
+            url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
+        if idp_id is None:
+            idp_id, _ = self._create_and_decapsulate_response()
+        if proto is None:
+            proto = uuid.uuid4().hex
+        if mapping_id is None:
+            mapping_id = uuid.uuid4().hex
+        body = {'mapping_id': mapping_id}
+        url = url % {'idp_id': idp_id, 'protocol_id': proto}
+        resp = self.put(url, body={'protocol': body}, **kwargs)
+        if validate:
+            self.assertValidResponse(resp, 'protocol', dummy_validator,
+                                     keys_to_check=['id', 'mapping_id'],
+                                     ref={'id': proto,
+                                          'mapping_id': mapping_id})
+        return (resp, idp_id, proto)
+
+    def _get_protocol(self, idp_id, protocol_id):
+        url = "%s/protocols/%s" % (idp_id, protocol_id)
+        url = self.base_url(suffix=url)
+        r = self.get(url)
+        return r
+
+    def test_create_idp(self):
+        """Creates the IdentityProvider entity."""
+
+        keys_to_check = self.idp_keys
+        body = self._http_idp_input()
+        resp = self._create_default_idp(body=body)
+        self.assertValidResponse(resp, 'identity_provider', dummy_validator,
+                                 keys_to_check=keys_to_check,
+                                 ref=body)
+
+    def test_create_idp_remote(self):
+        """Creates the IdentityProvider entity associated to a remote_id."""
+
+        keys_to_check = list(self.idp_keys)
+        keys_to_check.append('remote_id')
+        body = self.default_body.copy()
+        body['description'] = uuid.uuid4().hex
+        body['remote_id'] = uuid.uuid4().hex
+        resp = self._create_default_idp(body=body)
+        self.assertValidResponse(resp, 'identity_provider', dummy_validator,
+                                 keys_to_check=keys_to_check,
+                                 ref=body)
+
+    def test_list_idps(self, iterations=5):
+        """Lists all available IdentityProviders.
+
+        This test collects ids of created IdPs and
+        intersects it with the list of all available IdPs.
+        List of all IdPs can be a superset of IdPs created in this test,
+        because other tests also create IdPs.
+
+        """
+        def get_id(resp):
+            r = self._fetch_attribute_from_response(resp,
+                                                    'identity_provider')
+            return r.get('id')
+
+        ids = []
+        for _ in range(iterations):
+            id = get_id(self._create_default_idp())
+            ids.append(id)
+        ids = set(ids)
+
+        keys_to_check = self.idp_keys
+        url = self.base_url()
+        resp = self.get(url)
+        self.assertValidListResponse(resp, 'identity_providers',
+                                     dummy_validator,
+                                     keys_to_check=keys_to_check)
+        entities = self._fetch_attribute_from_response(resp,
+                                                       'identity_providers')
+        entities_ids = set([e['id'] for e in entities])
+        ids_intersection = entities_ids.intersection(ids)
+        self.assertEqual(ids_intersection, ids)
+
+    def test_check_idp_uniqueness(self):
+        """Add same IdP twice.
+
+        Expect HTTP 409 code for the latter call.
+
+        """
+        url = self.base_url(suffix=uuid.uuid4().hex)
+        body = self._http_idp_input()
+        self.put(url, body={'identity_provider': body},
+                 expected_status=201)
+        self.put(url, body={'identity_provider': body},
+                 expected_status=409)
+
+    def test_get_idp(self):
+        """Create and later fetch IdP."""
+        body = self._http_idp_input()
+        default_resp = self._create_default_idp(body=body)
+        default_idp = self._fetch_attribute_from_response(default_resp,
+                                                          'identity_provider')
+        idp_id = default_idp.get('id')
+        url = self.base_url(suffix=idp_id)
+        resp = self.get(url)
+        self.assertValidResponse(resp, 'identity_provider',
+                                 dummy_validator, keys_to_check=body.keys(),
+                                 ref=body)
+
+    def test_get_nonexisting_idp(self):
+        """Fetch nonexisting IdP entity.
+
+        Expected HTTP 404 status code.
+
+        """
+        idp_id = uuid.uuid4().hex
+        self.assertIsNotNone(idp_id)
+
+        url = self.base_url(suffix=idp_id)
+        self.get(url, expected_status=404)
+
+    def test_delete_existing_idp(self):
+        """Create and later delete IdP.
+
+        Expect HTTP 404 for the GET IdP call.
+        """
+        default_resp = self._create_default_idp()
+        default_idp = self._fetch_attribute_from_response(default_resp,
+                                                          'identity_provider')
+        idp_id = default_idp.get('id')
+        self.assertIsNotNone(idp_id)
+        url = self.base_url(suffix=idp_id)
+        self.delete(url)
+        self.get(url, expected_status=404)
+
+    def test_delete_nonexisting_idp(self):
+        """Delete nonexisting IdP.
+
+        Expect HTTP 404 for the GET IdP call.
+        """
+        idp_id = uuid.uuid4().hex
+        url = self.base_url(suffix=idp_id)
+        self.delete(url, expected_status=404)
+
+    def test_update_idp_mutable_attributes(self):
+        """Update IdP's mutable parameters."""
+        default_resp = self._create_default_idp()
+        default_idp = self._fetch_attribute_from_response(default_resp,
+                                                          'identity_provider')
+        idp_id = default_idp.get('id')
+        url = self.base_url(suffix=idp_id)
+        self.assertIsNotNone(idp_id)
+
+        _enabled = not default_idp.get('enabled')
+        body = {'remote_id': uuid.uuid4().hex,
+                'description': uuid.uuid4().hex,
+                'enabled': _enabled}
+
+        body = {'identity_provider': body}
+        resp = self.patch(url, body=body)
+        updated_idp = self._fetch_attribute_from_response(resp,
+                                                          'identity_provider')
+        body = body['identity_provider']
+        for key in body.keys():
+            self.assertEqual(body[key], updated_idp.get(key))
+
+        resp = self.get(url)
+        updated_idp = self._fetch_attribute_from_response(resp,
+                                                          'identity_provider')
+        for key in body.keys():
+            self.assertEqual(body[key], updated_idp.get(key))
+
+    def test_update_idp_immutable_attributes(self):
+        """Update IdP's immutable parameters.
+
+        Expect HTTP 403 code.
+
+        """
+        default_resp = self._create_default_idp()
+        default_idp = self._fetch_attribute_from_response(default_resp,
+                                                          'identity_provider')
+        idp_id = default_idp.get('id')
+        self.assertIsNotNone(idp_id)
+
+        body = self._http_idp_input()
+        body['id'] = uuid.uuid4().hex
+        body['protocols'] = [uuid.uuid4().hex, uuid.uuid4().hex]
+
+        url = self.base_url(suffix=idp_id)
+        self.patch(url, body={'identity_provider': body}, expected_status=403)
+
+    def test_update_nonexistent_idp(self):
+        """Update nonexistent IdP
+
+        Expect HTTP 404 code.
+
+        """
+        idp_id = uuid.uuid4().hex
+        url = self.base_url(suffix=idp_id)
+        body = self._http_idp_input()
+        body['enabled'] = False
+        body = {'identity_provider': body}
+
+        self.patch(url, body=body, expected_status=404)
+
+    def test_assign_protocol_to_idp(self):
+        """Assign a protocol to existing IdP."""
+
+        self._assign_protocol_to_idp(expected_status=201)
+
+    def test_protocol_composite_pk(self):
+        """Test whether Keystone let's add two entities with identical
+        names, however attached to different IdPs.
+
+        1. Add IdP and assign it protocol with predefined name
+        2. Add another IdP and assign it a protocol with same name.
+
+        Expect HTTP 201 code
+
+        """
+        url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
+
+        kwargs = {'expected_status': 201}
+        self._assign_protocol_to_idp(proto='saml2',
+                                     url=url, **kwargs)
+
+        self._assign_protocol_to_idp(proto='saml2',
+                                     url=url, **kwargs)
+
+    def test_protocol_idp_pk_uniqueness(self):
+        """Test whether Keystone checks for unique idp/protocol values.
+
+        Add same protocol twice, expect Keystone to reject a latter call and
+        return HTTP 409 code.
+
+        """
+        url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
+
+        kwargs = {'expected_status': 201}
+        resp, idp_id, proto = self._assign_protocol_to_idp(proto='saml2',
+                                                           url=url, **kwargs)
+        kwargs = {'expected_status': 409}
+        resp, idp_id, proto = self._assign_protocol_to_idp(idp_id=idp_id,
+                                                           proto='saml2',
+                                                           validate=False,
+                                                           url=url, **kwargs)
+
+    def test_assign_protocol_to_nonexistent_idp(self):
+        """Assign protocol to IdP that doesn't exist.
+
+        Expect HTTP 404 code.
+
+        """
+
+        idp_id = uuid.uuid4().hex
+        kwargs = {'expected_status': 404}
+        self._assign_protocol_to_idp(proto='saml2',
+                                     idp_id=idp_id,
+                                     validate=False,
+                                     **kwargs)
+
+    def test_get_protocol(self):
+        """Create and later fetch protocol tied to IdP."""
+
+        resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
+        proto_id = self._fetch_attribute_from_response(resp, 'protocol')['id']
+        url = "%s/protocols/%s" % (idp_id, proto_id)
+        url = self.base_url(suffix=url)
+
+        resp = self.get(url)
+
+        reference = {'id': proto_id}
+        self.assertValidResponse(resp, 'protocol',
+                                 dummy_validator,
+                                 keys_to_check=reference.keys(),
+                                 ref=reference)
+
+    def test_list_protocols(self):
+        """Create set of protocols and later list them.
+
+        Compare input and output id sets.
+
+        """
+        resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
+        iterations = random.randint(0, 16)
+        protocol_ids = []
+        for _ in range(iterations):
+            resp, _, proto = self._assign_protocol_to_idp(idp_id=idp_id,
+                                                          expected_status=201)
+            proto_id = self._fetch_attribute_from_response(resp, 'protocol')
+            proto_id = proto_id['id']
+            protocol_ids.append(proto_id)
+
+        url = "%s/protocols" % idp_id
+        url = self.base_url(suffix=url)
+        resp = self.get(url)
+        self.assertValidListResponse(resp, 'protocols',
+                                     dummy_validator,
+                                     keys_to_check=['id'])
+        entities = self._fetch_attribute_from_response(resp, 'protocols')
+        entities = set([entity['id'] for entity in entities])
+        protocols_intersection = entities.intersection(protocol_ids)
+        self.assertEqual(protocols_intersection, set(protocol_ids))
+
+    def test_update_protocols_attribute(self):
+        """Update protocol's attribute."""
+
+        resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
+        new_mapping_id = uuid.uuid4().hex
+
+        url = "%s/protocols/%s" % (idp_id, proto)
+        url = self.base_url(suffix=url)
+        body = {'mapping_id': new_mapping_id}
+        resp = self.patch(url, body={'protocol': body})
+        self.assertValidResponse(resp, 'protocol', dummy_validator,
+                                 keys_to_check=['id', 'mapping_id'],
+                                 ref={'id': proto,
+                                      'mapping_id': new_mapping_id}
+                                 )
+
+    def test_delete_protocol(self):
+        """Delete protocol.
+
+        Expect HTTP 404 code for the GET call after the protocol is deleted.
+
+        """
+        url = self.base_url(suffix='/%(idp_id)s/'
+                                   'protocols/%(protocol_id)s')
+        resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
+        url = url % {'idp_id': idp_id,
+                     'protocol_id': proto}
+        self.delete(url)
+        self.get(url, expected_status=404)
+
+
+class MappingCRUDTests(FederationTests):
+    """A class for testing CRUD operations for Mappings."""
+
+    MAPPING_URL = '/OS-FEDERATION/mappings/'
+
+    def assertValidMappingListResponse(self, resp, *args, **kwargs):
+        return self.assertValidListResponse(
+            resp,
+            'mappings',
+            self.assertValidMapping,
+            keys_to_check=[],
+            *args,
+            **kwargs)
+
+    def assertValidMappingResponse(self, resp, *args, **kwargs):
+        return self.assertValidResponse(
+            resp,
+            'mapping',
+            self.assertValidMapping,
+            keys_to_check=[],
+            *args,
+            **kwargs)
+
+    def assertValidMapping(self, entity, ref=None):
+        self.assertIsNotNone(entity.get('id'))
+        self.assertIsNotNone(entity.get('rules'))
+        if ref:
+            self.assertEqual(jsonutils.loads(entity['rules']), ref['rules'])
+        return entity
+
+    def _create_default_mapping_entry(self):
+        url = self.MAPPING_URL + uuid.uuid4().hex
+        resp = self.put(url,
+                        body={'mapping': mapping_fixtures.MAPPING_LARGE},
+                        expected_status=201)
+        return resp
+
+    def _get_id_from_response(self, resp):
+        r = resp.result.get('mapping')
+        return r.get('id')
+
+    def test_mapping_create(self):
+        resp = self._create_default_mapping_entry()
+        self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE)
+
+    def test_mapping_list(self):
+        url = self.MAPPING_URL
+        self._create_default_mapping_entry()
+        resp = self.get(url)
+        entities = resp.result.get('mappings')
+        self.assertIsNotNone(entities)
+        self.assertResponseStatus(resp, 200)
+        self.assertValidListLinks(resp.result.get('links'))
+        self.assertEqual(1, len(entities))
+
+    def test_mapping_delete(self):
+        url = self.MAPPING_URL + '%(mapping_id)s'
+        resp = self._create_default_mapping_entry()
+        mapping_id = self._get_id_from_response(resp)
+        url = url % {'mapping_id': str(mapping_id)}
+        resp = self.delete(url)
+        self.assertResponseStatus(resp, 204)
+        self.get(url, expected_status=404)
+
+    def test_mapping_get(self):
+        url = self.MAPPING_URL + '%(mapping_id)s'
+        resp = self._create_default_mapping_entry()
+        mapping_id = self._get_id_from_response(resp)
+        url = url % {'mapping_id': mapping_id}
+        resp = self.get(url)
+        self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE)
+
+    def test_mapping_update(self):
+        url = self.MAPPING_URL + '%(mapping_id)s'
+        resp = self._create_default_mapping_entry()
+        mapping_id = self._get_id_from_response(resp)
+        url = url % {'mapping_id': mapping_id}
+        resp = self.patch(url,
+                          body={'mapping': mapping_fixtures.MAPPING_SMALL})
+        self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL)
+        resp = self.get(url)
+        self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL)
+
+    def test_delete_mapping_dne(self):
+        url = self.MAPPING_URL + uuid.uuid4().hex
+        self.delete(url, expected_status=404)
+
+    def test_get_mapping_dne(self):
+        url = self.MAPPING_URL + uuid.uuid4().hex
+        self.get(url, expected_status=404)
+
+    def test_create_mapping_bad_requirements(self):
+        url = self.MAPPING_URL + uuid.uuid4().hex
+        self.put(url, expected_status=400,
+                 body={'mapping': mapping_fixtures.MAPPING_BAD_REQ})
+
+    def test_create_mapping_no_rules(self):
+        url = self.MAPPING_URL + uuid.uuid4().hex
+        self.put(url, expected_status=400,
+                 body={'mapping': mapping_fixtures.MAPPING_NO_RULES})
+
+    def test_create_mapping_no_remote_objects(self):
+        url = self.MAPPING_URL + uuid.uuid4().hex
+        self.put(url, expected_status=400,
+                 body={'mapping': mapping_fixtures.MAPPING_NO_REMOTE})
+
+    def test_create_mapping_bad_value(self):
+        url = self.MAPPING_URL + uuid.uuid4().hex
+        self.put(url, expected_status=400,
+                 body={'mapping': mapping_fixtures.MAPPING_BAD_VALUE})
+
+    def test_create_mapping_missing_local(self):
+        url = self.MAPPING_URL + uuid.uuid4().hex
+        self.put(url, expected_status=400,
+                 body={'mapping': mapping_fixtures.MAPPING_MISSING_LOCAL})
+
+    def test_create_mapping_missing_type(self):
+        url = self.MAPPING_URL + uuid.uuid4().hex
+        self.put(url, expected_status=400,
+                 body={'mapping': mapping_fixtures.MAPPING_MISSING_TYPE})
+
+    def test_create_mapping_wrong_type(self):
+        url = self.MAPPING_URL + uuid.uuid4().hex
+        self.put(url, expected_status=400,
+                 body={'mapping': mapping_fixtures.MAPPING_WRONG_TYPE})
+
+    def test_create_mapping_extra_remote_properties_not_any_of(self):
+        url = self.MAPPING_URL + uuid.uuid4().hex
+        mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_NOT_ANY_OF
+        self.put(url, expected_status=400, body={'mapping': mapping})
+
+    def test_create_mapping_extra_remote_properties_any_one_of(self):
+        url = self.MAPPING_URL + uuid.uuid4().hex
+        mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_ANY_ONE_OF
+        self.put(url, expected_status=400, body={'mapping': mapping})
+
+    def test_create_mapping_extra_remote_properties_just_type(self):
+        url = self.MAPPING_URL + uuid.uuid4().hex
+        mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_JUST_TYPE
+        self.put(url, expected_status=400, body={'mapping': mapping})
+
+    def test_create_mapping_empty_map(self):
+        url = self.MAPPING_URL + uuid.uuid4().hex
+        self.put(url, expected_status=400,
+                 body={'mapping': {}})
+
+    def test_create_mapping_extra_rules_properties(self):
+        url = self.MAPPING_URL + uuid.uuid4().hex
+        self.put(url, expected_status=400,
+                 body={'mapping': mapping_fixtures.MAPPING_EXTRA_RULES_PROPS})
+
+    def test_create_mapping_with_blacklist_and_whitelist(self):
+        """Test for adding whitelist and blacklist in the rule
+
+        Server should respond with HTTP 400 error upon discovering both
+        ``whitelist`` and ``blacklist`` keywords in the same rule.
+
+        """
+        url = self.MAPPING_URL + uuid.uuid4().hex
+        mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_AND_BLACKLIST
+        self.put(url, expected_status=400, body={'mapping': mapping})
+
+
+class MappingRuleEngineTests(FederationTests):
+    """A class for testing the mapping rule engine."""
+
+    def assertValidMappedUserObject(self, mapped_properties,
+                                    user_type='ephemeral',
+                                    domain_id=None):
+        """Check whether mapped properties object has 'user' within.
+
+        According to today's rules, RuleProcessor does not have to issue user's
+        id or name. What's actually required is user's type and for ephemeral
+        users that would be service domain named 'Federated'.
+        """
+        self.assertIn('user', mapped_properties,
+                      message='Missing user object in mapped properties')
+        user = mapped_properties['user']
+        self.assertIn('type', user)
+        self.assertEqual(user_type, user['type'])
+        self.assertIn('domain', user)
+        domain = user['domain']
+        domain_name_or_id = domain.get('id') or domain.get('name')
+        domain_ref = domain_id or federation.FEDERATED_DOMAIN_KEYWORD
+        self.assertEqual(domain_ref, domain_name_or_id)
+
+    def test_rule_engine_any_one_of_and_direct_mapping(self):
+        """Should return user's name and group id EMPLOYEE_GROUP_ID.
+
+        The ADMIN_ASSERTION should successfully have a match in MAPPING_LARGE.
+        They will test the case where `any_one_of` is valid, and there is
+        a direct mapping for the users name.
+
+        """
+
+        mapping = mapping_fixtures.MAPPING_LARGE
+        assertion = mapping_fixtures.ADMIN_ASSERTION
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        values = rp.process(assertion)
+
+        fn = assertion.get('FirstName')
+        ln = assertion.get('LastName')
+        full_name = '%s %s' % (fn, ln)
+        group_ids = values.get('group_ids')
+        user_name = values.get('user', {}).get('name')
+
+        self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids)
+        self.assertEqual(full_name, user_name)
+
+    def test_rule_engine_no_regex_match(self):
+        """Should deny authorization, the email of the tester won't match.
+
+        This will not match since the email in the assertion will fail
+        the regex test. It is set to match any @example.com address.
+        But the incoming value is set to eviltester@example.org.
+        RuleProcessor should return list of empty group_ids.
+
+        """
+
+        mapping = mapping_fixtures.MAPPING_LARGE
+        assertion = mapping_fixtures.BAD_TESTER_ASSERTION
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        mapped_properties = rp.process(assertion)
+
+        self.assertValidMappedUserObject(mapped_properties)
+        self.assertIsNone(mapped_properties['user'].get('name'))
+        self.assertListEqual(list(), mapped_properties['group_ids'])
+
+    def test_rule_engine_regex_many_groups(self):
+        """Should return group CONTRACTOR_GROUP_ID.
+
+        The TESTER_ASSERTION should successfully have a match in
+        MAPPING_TESTER_REGEX. This will test the case where many groups
+        are in the assertion, and a regex value is used to try and find
+        a match.
+
+        """
+
+        mapping = mapping_fixtures.MAPPING_TESTER_REGEX
+        assertion = mapping_fixtures.TESTER_ASSERTION
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        values = rp.process(assertion)
+
+        self.assertValidMappedUserObject(values)
+        user_name = assertion.get('UserName')
+        group_ids = values.get('group_ids')
+        name = values.get('user', {}).get('name')
+
+        self.assertEqual(user_name, name)
+        self.assertIn(mapping_fixtures.TESTER_GROUP_ID, group_ids)
+
+    def test_rule_engine_any_one_of_many_rules(self):
+        """Should return group CONTRACTOR_GROUP_ID.
+
+        The CONTRACTOR_ASSERTION should successfully have a match in
+        MAPPING_SMALL. This will test the case where many rules
+        must be matched, including an `any_one_of`, and a direct
+        mapping.
+
+        """
+
+        mapping = mapping_fixtures.MAPPING_SMALL
+        assertion = mapping_fixtures.CONTRACTOR_ASSERTION
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        values = rp.process(assertion)
+
+        self.assertValidMappedUserObject(values)
+        user_name = assertion.get('UserName')
+        group_ids = values.get('group_ids')
+        name = values.get('user', {}).get('name')
+
+        self.assertEqual(user_name, name)
+        self.assertIn(mapping_fixtures.CONTRACTOR_GROUP_ID, group_ids)
+
+    def test_rule_engine_not_any_of_and_direct_mapping(self):
+        """Should return user's name and email.
+
+        The CUSTOMER_ASSERTION should successfully have a match in
+        MAPPING_LARGE. This will test the case where a requirement
+        has `not_any_of`, and direct mapping to a username, no group.
+
+        """
+
+        mapping = mapping_fixtures.MAPPING_LARGE
+        assertion = mapping_fixtures.CUSTOMER_ASSERTION
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        values = rp.process(assertion)
+
+        self.assertValidMappedUserObject(values)
+        user_name = assertion.get('UserName')
+        group_ids = values.get('group_ids')
+        name = values.get('user', {}).get('name')
+
+        self.assertEqual(user_name, name)
+        self.assertEqual([], group_ids,)
+
+    def test_rule_engine_not_any_of_many_rules(self):
+        """Should return group EMPLOYEE_GROUP_ID.
+
+        The EMPLOYEE_ASSERTION should successfully have a match in
+        MAPPING_SMALL. This will test the case where many remote
+        rules must be matched, including a `not_any_of`.
+
+        """
+
+        mapping = mapping_fixtures.MAPPING_SMALL
+        assertion = mapping_fixtures.EMPLOYEE_ASSERTION
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        values = rp.process(assertion)
+
+        self.assertValidMappedUserObject(values)
+        user_name = assertion.get('UserName')
+        group_ids = values.get('group_ids')
+        name = values.get('user', {}).get('name')
+
+        self.assertEqual(user_name, name)
+        self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids)
+
+    def test_rule_engine_not_any_of_regex_verify_pass(self):
+        """Should return group DEVELOPER_GROUP_ID.
+
+        The DEVELOPER_ASSERTION should successfully have a match in
+        MAPPING_DEVELOPER_REGEX. This will test the case where many
+        remote rules must be matched, including a `not_any_of`, with
+        regex set to True.
+
+        """
+
+        mapping = mapping_fixtures.MAPPING_DEVELOPER_REGEX
+        assertion = mapping_fixtures.DEVELOPER_ASSERTION
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        values = rp.process(assertion)
+
+        self.assertValidMappedUserObject(values)
+        user_name = assertion.get('UserName')
+        group_ids = values.get('group_ids')
+        name = values.get('user', {}).get('name')
+
+        self.assertEqual(user_name, name)
+        self.assertIn(mapping_fixtures.DEVELOPER_GROUP_ID, group_ids)
+
+    def test_rule_engine_not_any_of_regex_verify_fail(self):
+        """Should deny authorization.
+
+        The email in the assertion will fail the regex test.
+        It is set to reject any @example.org address, but the
+        incoming value is set to evildeveloper@example.org.
+        RuleProcessor should return list of empty group_ids.
+
+        """
+
+        mapping = mapping_fixtures.MAPPING_DEVELOPER_REGEX
+        assertion = mapping_fixtures.BAD_DEVELOPER_ASSERTION
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        mapped_properties = rp.process(assertion)
+
+        self.assertValidMappedUserObject(mapped_properties)
+        self.assertIsNone(mapped_properties['user'].get('name'))
+        self.assertListEqual(list(), mapped_properties['group_ids'])
+
+    def _rule_engine_regex_match_and_many_groups(self, assertion):
+        """Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID.
+
+        A helper function injecting assertion passed as an argument.
+        Expect DEVELOPER_GROUP_ID and TESTER_GROUP_ID in the results.
+
+        """
+
+        mapping = mapping_fixtures.MAPPING_LARGE
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        values = rp.process(assertion)
+
+        user_name = assertion.get('UserName')
+        group_ids = values.get('group_ids')
+        name = values.get('user', {}).get('name')
+
+        self.assertValidMappedUserObject(values)
+        self.assertEqual(user_name, name)
+        self.assertIn(mapping_fixtures.DEVELOPER_GROUP_ID, group_ids)
+        self.assertIn(mapping_fixtures.TESTER_GROUP_ID, group_ids)
+
+    def test_rule_engine_regex_match_and_many_groups(self):
+        """Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID.
+
+        The TESTER_ASSERTION should successfully have a match in
+        MAPPING_LARGE. This will test a successful regex match
+        for an `any_one_of` evaluation type, and will have many
+        groups returned.
+
+        """
+        self._rule_engine_regex_match_and_many_groups(
+            mapping_fixtures.TESTER_ASSERTION)
+
+    def test_rule_engine_discards_nonstring_objects(self):
+        """Check whether RuleProcessor discards non string objects.
+
+        Despite the fact that assertion is malformed and contains
+        non string objects, RuleProcessor should correctly discard them and
+        successfully have a match in MAPPING_LARGE.
+
+        """
+        self._rule_engine_regex_match_and_many_groups(
+            mapping_fixtures.MALFORMED_TESTER_ASSERTION)
+
+    def test_rule_engine_fails_after_discarding_nonstring(self):
+        """Check whether RuleProcessor discards non string objects.
+
+        Expect RuleProcessor to discard non string object, which
+        is required for a correct rule match. RuleProcessor will result with
+        empty list of groups.
+
+        """
+        mapping = mapping_fixtures.MAPPING_SMALL
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        assertion = mapping_fixtures.CONTRACTOR_MALFORMED_ASSERTION
+        mapped_properties = rp.process(assertion)
+        self.assertValidMappedUserObject(mapped_properties)
+        self.assertIsNone(mapped_properties['user'].get('name'))
+        self.assertListEqual(list(), mapped_properties['group_ids'])
+
+    def test_rule_engine_returns_group_names(self):
+        """Check whether RuleProcessor returns group names with their domains.
+
+        RuleProcessor should return 'group_names' entry with a list of
+        dictionaries with two entries 'name' and 'domain' identifying group by
+        its name and domain.
+
+        """
+        mapping = mapping_fixtures.MAPPING_GROUP_NAMES
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        assertion = mapping_fixtures.EMPLOYEE_ASSERTION
+        mapped_properties = rp.process(assertion)
+        self.assertIsNotNone(mapped_properties)
+        self.assertValidMappedUserObject(mapped_properties)
+        reference = {
+            mapping_fixtures.DEVELOPER_GROUP_NAME:
+            {
+                "name": mapping_fixtures.DEVELOPER_GROUP_NAME,
+                "domain": {
+                    "name": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_NAME
+                }
+            },
+            mapping_fixtures.TESTER_GROUP_NAME:
+            {
+                "name": mapping_fixtures.TESTER_GROUP_NAME,
+                "domain": {
+                    "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
+                }
+            }
+        }
+        for rule in mapped_properties['group_names']:
+            self.assertDictEqual(reference.get(rule.get('name')), rule)
+
+    def test_rule_engine_whitelist_and_direct_groups_mapping(self):
+        """Should return user's groups Developer and Contractor.
+
+        The EMPLOYEE_ASSERTION_MULTIPLE_GROUPS should successfully have a match
+        in MAPPING_GROUPS_WHITELIST. It will test the case where 'whitelist'
+        correctly filters out Manager and only allows Developer and Contractor.
+
+        """
+
+        mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST
+        assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        mapped_properties = rp.process(assertion)
+        self.assertIsNotNone(mapped_properties)
+
+        reference = {
+            mapping_fixtures.DEVELOPER_GROUP_NAME:
+            {
+                "name": mapping_fixtures.DEVELOPER_GROUP_NAME,
+                "domain": {
+                    "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
+                }
+            },
+            mapping_fixtures.CONTRACTOR_GROUP_NAME:
+            {
+                "name": mapping_fixtures.CONTRACTOR_GROUP_NAME,
+                "domain": {
+                    "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
+                }
+            }
+        }
+        for rule in mapped_properties['group_names']:
+            self.assertDictEqual(reference.get(rule.get('name')), rule)
+
+        self.assertEqual('tbo', mapped_properties['user']['name'])
+        self.assertEqual([], mapped_properties['group_ids'])
+
+    def test_rule_engine_blacklist_and_direct_groups_mapping(self):
+        """Should return user's group Developer.
+
+        The EMPLOYEE_ASSERTION_MULTIPLE_GROUPS should successfully have a match
+        in MAPPING_GROUPS_BLACKLIST. It will test the case where 'blacklist'
+        correctly filters out Manager and Developer and only allows Contractor.
+
+        """
+
+        mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST
+        assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        mapped_properties = rp.process(assertion)
+        self.assertIsNotNone(mapped_properties)
+
+        reference = {
+            mapping_fixtures.CONTRACTOR_GROUP_NAME:
+            {
+                "name": mapping_fixtures.CONTRACTOR_GROUP_NAME,
+                "domain": {
+                    "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
+                }
+            }
+        }
+        for rule in mapped_properties['group_names']:
+            self.assertDictEqual(reference.get(rule.get('name')), rule)
+        self.assertEqual('tbo', mapped_properties['user']['name'])
+        self.assertEqual([], mapped_properties['group_ids'])
+
+    def test_rule_engine_blacklist_and_direct_groups_mapping_multiples(self):
+        """Tests matching multiple values before the blacklist.
+
+        Verifies that the local indexes are correct when matching multiple
+        remote values for a field when the field occurs before the blacklist
+        entry in the remote rules.
+
+        """
+
+        mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST_MULTIPLES
+        assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        mapped_properties = rp.process(assertion)
+        self.assertIsNotNone(mapped_properties)
+
+        reference = {
+            mapping_fixtures.CONTRACTOR_GROUP_NAME:
+            {
+                "name": mapping_fixtures.CONTRACTOR_GROUP_NAME,
+                "domain": {
+                    "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID
+                }
+            }
+        }
+        for rule in mapped_properties['group_names']:
+            self.assertDictEqual(reference.get(rule.get('name')), rule)
+        self.assertEqual('tbo', mapped_properties['user']['name'])
+        self.assertEqual([], mapped_properties['group_ids'])
+
+    def test_rule_engine_whitelist_direct_group_mapping_missing_domain(self):
+        """Test if the local rule is rejected upon missing domain value
+
+        This is a variation with a ``whitelist`` filter.
+
+        """
+        mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_MISSING_DOMAIN
+        assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        self.assertRaises(exception.ValidationError, rp.process, assertion)
+
+    def test_rule_engine_blacklist_direct_group_mapping_missing_domain(self):
+        """Test if the local rule is rejected upon missing domain value
+
+        This is a variation with a ``blacklist`` filter.
+
+        """
+        mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST_MISSING_DOMAIN
+        assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        self.assertRaises(exception.ValidationError, rp.process, assertion)
+
+    def test_rule_engine_no_groups_allowed(self):
+        """Should return user mapped to no groups.
+
+        The EMPLOYEE_ASSERTION should successfully have a match
+        in MAPPING_GROUPS_WHITELIST, but 'whitelist' should filter out
+        the group values from the assertion and thus map to no groups.
+
+        """
+        mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST
+        assertion = mapping_fixtures.EMPLOYEE_ASSERTION
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        mapped_properties = rp.process(assertion)
+        self.assertIsNotNone(mapped_properties)
+        self.assertListEqual(mapped_properties['group_names'], [])
+        self.assertListEqual(mapped_properties['group_ids'], [])
+        self.assertEqual('tbo', mapped_properties['user']['name'])
+
+    def test_mapping_federated_domain_specified(self):
+        """Test mapping engine when domain 'ephemeral' is explicitely set.
+
+        For that, we use mapping rule MAPPING_EPHEMERAL_USER and assertion
+        EMPLOYEE_ASSERTION
+
+        """
+        mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        assertion = mapping_fixtures.EMPLOYEE_ASSERTION
+        mapped_properties = rp.process(assertion)
+        self.assertIsNotNone(mapped_properties)
+        self.assertValidMappedUserObject(mapped_properties)
+
+    def test_create_user_object_with_bad_mapping(self):
+        """Test if user object is created even with bad mapping.
+
+        User objects will be created by mapping engine always as long as there
+        is corresponding local rule.  This test shows, that even with assertion
+        where no group names nor ids are matched, but there is 'blind' rule for
+        mapping user, such object will be created.
+
+        In this test MAPPING_EHPEMERAL_USER expects UserName set to jsmith
+        whereas value from assertion is 'tbo'.
+
+        """
+        mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        assertion = mapping_fixtures.CONTRACTOR_ASSERTION
+        mapped_properties = rp.process(assertion)
+        self.assertIsNotNone(mapped_properties)
+        self.assertValidMappedUserObject(mapped_properties)
+
+        self.assertNotIn('id', mapped_properties['user'])
+        self.assertNotIn('name', mapped_properties['user'])
+
+    def test_set_ephemeral_domain_to_ephemeral_users(self):
+        """Test auto assigning service domain to ephemeral users.
+
+        Test that ephemeral users will always become members of federated
+        service domain. The check depends on ``type`` value which must be set
+        to ``ephemeral`` in case of ephemeral user.
+
+        """
+        mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER_LOCAL_DOMAIN
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        assertion = mapping_fixtures.CONTRACTOR_ASSERTION
+        mapped_properties = rp.process(assertion)
+        self.assertIsNotNone(mapped_properties)
+        self.assertValidMappedUserObject(mapped_properties)
+
+    def test_local_user_local_domain(self):
+        """Test that local users can have non-service domains assigned."""
+        mapping = mapping_fixtures.MAPPING_LOCAL_USER_LOCAL_DOMAIN
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        assertion = mapping_fixtures.CONTRACTOR_ASSERTION
+        mapped_properties = rp.process(assertion)
+        self.assertIsNotNone(mapped_properties)
+        self.assertValidMappedUserObject(
+            mapped_properties, user_type='local',
+            domain_id=mapping_fixtures.LOCAL_DOMAIN)
+
+    def test_user_identifications_name(self):
+        """Test varius mapping options and how users are identified.
+
+        This test calls mapped.setup_username() for propagating user object.
+
+        Test plan:
+        - Check if the user has proper domain ('federated') set
+        - Check if the user has property type set ('ephemeral')
+        - Check if user's name is properly mapped from the assertion
+        - Check if user's id is properly set and equal to name, as it was not
+        explicitely specified in the mapping.
+
+        """
+        mapping = mapping_fixtures.MAPPING_USER_IDS
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        assertion = mapping_fixtures.CONTRACTOR_ASSERTION
+        mapped_properties = rp.process(assertion)
+        self.assertIsNotNone(mapped_properties)
+        self.assertValidMappedUserObject(mapped_properties)
+        mapped.setup_username({}, mapped_properties)
+        self.assertEqual('jsmith', mapped_properties['user']['id'])
+        self.assertEqual('jsmith', mapped_properties['user']['name'])
+
+    def test_user_identifications_name_and_federated_domain(self):
+        """Test varius mapping options and how users are identified.
+
+        This test calls mapped.setup_username() for propagating user object.
+
+        Test plan:
+        - Check if the user has proper domain ('federated') set
+        - Check if the user has propert type set ('ephemeral')
+        - Check if user's name is properly mapped from the assertion
+        - Check if user's id is properly set and equal to name, as it was not
+        explicitely specified in the mapping.
+
+        """
+        mapping = mapping_fixtures.MAPPING_USER_IDS
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        assertion = mapping_fixtures.EMPLOYEE_ASSERTION
+        mapped_properties = rp.process(assertion)
+        self.assertIsNotNone(mapped_properties)
+        self.assertValidMappedUserObject(mapped_properties)
+        mapped.setup_username({}, mapped_properties)
+        self.assertEqual('tbo', mapped_properties['user']['name'])
+        self.assertEqual('tbo', mapped_properties['user']['id'])
+
+    def test_user_identification_id(self):
+        """Test varius mapping options and how users are identified.
+
+        This test calls mapped.setup_username() for propagating user object.
+
+        Test plan:
+        - Check if the user has proper domain ('federated') set
+        - Check if the user has propert type set ('ephemeral')
+        - Check if user's id is properly mapped from the assertion
+        - Check if user's name is properly set and equal to id, as it was not
+        explicitely specified in the mapping.
+
+        """
+        mapping = mapping_fixtures.MAPPING_USER_IDS
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        assertion = mapping_fixtures.ADMIN_ASSERTION
+        mapped_properties = rp.process(assertion)
+        context = {'environment': {}}
+        self.assertIsNotNone(mapped_properties)
+        self.assertValidMappedUserObject(mapped_properties)
+        mapped.setup_username(context, mapped_properties)
+        self.assertEqual('bob', mapped_properties['user']['name'])
+        self.assertEqual('bob', mapped_properties['user']['id'])
+
+    def test_user_identification_id_and_name(self):
+        """Test varius mapping options and how users are identified.
+
+        This test calls mapped.setup_username() for propagating user object.
+
+        Test plan:
+        - Check if the user has proper domain ('federated') set
+        - Check if the user has proper type set ('ephemeral')
+        - Check if user's name is properly mapped from the assertion
+        - Check if user's id is properly set and and equal to value hardcoded
+        in the mapping
+
+        """
+        mapping = mapping_fixtures.MAPPING_USER_IDS
+        rp = mapping_utils.RuleProcessor(mapping['rules'])
+        assertion = mapping_fixtures.CUSTOMER_ASSERTION
+        mapped_properties = rp.process(assertion)
+        context = {'environment': {}}
+        self.assertIsNotNone(mapped_properties)
+        self.assertValidMappedUserObject(mapped_properties)
+        mapped.setup_username(context, mapped_properties)
+        self.assertEqual('bwilliams', mapped_properties['user']['name'])
+        self.assertEqual('abc123', mapped_properties['user']['id'])
+
+
+class FederatedTokenTests(FederationTests, FederatedSetupMixin):
+
+    def auth_plugin_config_override(self):
+        methods = ['saml2']
+        method_classes = {'saml2': 'keystone.auth.plugins.saml2.Saml2'}
+        super(FederatedTokenTests, self).auth_plugin_config_override(
+            methods, **method_classes)
+
+    def setUp(self):
+        super(FederatedTokenTests, self).setUp()
+        self._notifications = []
+
+        def fake_saml_notify(action, context, user_id, group_ids,
+                             identity_provider, protocol, token_id, outcome):
+            note = {
+                'action': action,
+                'user_id': user_id,
+                'identity_provider': identity_provider,
+                'protocol': protocol,
+                'send_notification_called': True}
+            self._notifications.append(note)
+
+        self.useFixture(mockpatch.PatchObject(
+            notifications,
+            'send_saml_audit_notification',
+            fake_saml_notify))
+
+    def _assert_last_notify(self, action, identity_provider, protocol,
+                            user_id=None):
+        self.assertTrue(self._notifications)
+        note = self._notifications[-1]
+        if user_id:
+            self.assertEqual(note['user_id'], user_id)
+        self.assertEqual(note['action'], action)
+        self.assertEqual(note['identity_provider'], identity_provider)
+        self.assertEqual(note['protocol'], protocol)
+        self.assertTrue(note['send_notification_called'])
+
+    def load_fixtures(self, fixtures):
+        super(FederationTests, self).load_fixtures(fixtures)
+        self.load_federation_sample_data()
+
+    def test_issue_unscoped_token_notify(self):
+        self._issue_unscoped_token()
+        self._assert_last_notify(self.ACTION, self.IDP, self.PROTOCOL)
+
+    def test_issue_unscoped_token(self):
+        r = self._issue_unscoped_token()
+        self.assertIsNotNone(r.headers.get('X-Subject-Token'))
+
+    def test_issue_unscoped_token_disabled_idp(self):
+        """Checks if authentication works with disabled identity providers.
+
+        Test plan:
+        1) Disable default IdP
+        2) Try issuing unscoped token for that IdP
+        3) Expect server to forbid authentication
+
+        """
+        enabled_false = {'enabled': False}
+        self.federation_api.update_idp(self.IDP, enabled_false)
+        self.assertRaises(exception.Forbidden,
+                          self._issue_unscoped_token)
+
+    def test_issue_unscoped_token_group_names_in_mapping(self):
+        r = self._issue_unscoped_token(assertion='ANOTHER_CUSTOMER_ASSERTION')
+        ref_groups = set([self.group_customers['id'], self.group_admins['id']])
+        token_resp = r.json_body
+        token_groups = token_resp['token']['user']['OS-FEDERATION']['groups']
+        token_groups = set([group['id'] for group in token_groups])
+        self.assertEqual(ref_groups, token_groups)
+
+    def test_issue_unscoped_tokens_nonexisting_group(self):
+        self.assertRaises(exception.MissingGroups,
+                          self._issue_unscoped_token,
+                          assertion='ANOTHER_TESTER_ASSERTION')
+
+    def test_issue_unscoped_token_with_remote_no_attribute(self):
+        r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE,
+                                       environment={
+                                           self.REMOTE_ID_ATTR: self.REMOTE_ID
+                                       })
+        self.assertIsNotNone(r.headers.get('X-Subject-Token'))
+
+    def test_issue_unscoped_token_with_remote(self):
+        self.config_fixture.config(group='federation',
+                                   remote_id_attribute=self.REMOTE_ID_ATTR)
+        r = self._issue_unscoped_token(idp=self.IDP_WITH_REMOTE,
+                                       environment={
+                                           self.REMOTE_ID_ATTR: self.REMOTE_ID
+                                       })
+        self.assertIsNotNone(r.headers.get('X-Subject-Token'))
+
+    def test_issue_unscoped_token_with_remote_different(self):
+        self.config_fixture.config(group='federation',
+                                   remote_id_attribute=self.REMOTE_ID_ATTR)
+        self.assertRaises(exception.Forbidden,
+                          self._issue_unscoped_token,
+                          idp=self.IDP_WITH_REMOTE,
+                          environment={
+                              self.REMOTE_ID_ATTR: uuid.uuid4().hex
+                          })
+
+    def test_issue_unscoped_token_with_remote_unavailable(self):
+        self.config_fixture.config(group='federation',
+                                   remote_id_attribute=self.REMOTE_ID_ATTR)
+        self.assertRaises(exception.ValidationError,
+                          self._issue_unscoped_token,
+                          idp=self.IDP_WITH_REMOTE,
+                          environment={
+                              uuid.uuid4().hex: uuid.uuid4().hex
+                          })
+
+    def test_issue_unscoped_token_with_remote_user_as_empty_string(self):
+        # make sure that REMOTE_USER set as the empty string won't interfere
+        r = self._issue_unscoped_token(environment={'REMOTE_USER': ''})
+        self.assertIsNotNone(r.headers.get('X-Subject-Token'))
+
+    def test_issue_unscoped_token_no_groups(self):
+        self.assertRaises(exception.Unauthorized,
+                          self._issue_unscoped_token,
+                          assertion='BAD_TESTER_ASSERTION')
+
+    def test_issue_unscoped_token_malformed_environment(self):
+        """Test whether non string objects are filtered out.
+
+        Put non string objects into the environment, inject
+        correct assertion and try to get an unscoped token.
+        Expect server not to fail on using split() method on
+        non string objects and return token id in the HTTP header.
+
+        """
+        api = auth_controllers.Auth()
+        context = {
+            'environment': {
+                'malformed_object': object(),
+                'another_bad_idea': tuple(xrange(10)),
+                'yet_another_bad_param': dict(zip(uuid.uuid4().hex,
+                                                  range(32)))
+            }
+        }
+        self._inject_assertion(context, 'EMPLOYEE_ASSERTION')
+        r = api.authenticate_for_token(context, self.UNSCOPED_V3_SAML2_REQ)
+        self.assertIsNotNone(r.headers.get('X-Subject-Token'))
+
+    def test_scope_to_project_once_notify(self):
+        r = self.v3_authenticate_token(
+            self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
+        user_id = r.json['token']['user']['id']
+        self._assert_last_notify(self.ACTION, self.IDP, self.PROTOCOL, user_id)
+
+    def test_scope_to_project_once(self):
+        r = self.v3_authenticate_token(
+            self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
+        token_resp = r.result['token']
+        project_id = token_resp['project']['id']
+        self.assertEqual(project_id, self.proj_employees['id'])
+        self._check_scoped_token_attributes(token_resp)
+        roles_ref = [self.role_employee]
+        projects_ref = self.proj_employees
+        self._check_projects_and_roles(token_resp, roles_ref, projects_ref)
+
+    def test_scope_token_with_idp_disabled(self):
+        """Scope token issued by disabled IdP.
+
+        Try scoping the token issued by an IdP which is disabled now. Expect
+        server to refuse scoping operation.
+
+        This test confirms correct behaviour when IdP was enabled and unscoped
+        token was issued, but disabled before user tries to scope the token.
+        Here we assume the unscoped token was already issued and start from
+        the moment where IdP is being disabled and unscoped token is being
+        used.
+
+        Test plan:
+        1) Disable IdP
+        2) Try scoping unscoped token
+
+        """
+        enabled_false = {'enabled': False}
+        self.federation_api.update_idp(self.IDP, enabled_false)
+        self.v3_authenticate_token(
+            self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER,
+            expected_status=403)
+
+    def test_scope_to_bad_project(self):
+        """Scope unscoped token with a project we don't have access to."""
+
+        self.v3_authenticate_token(
+            self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER,
+            expected_status=401)
+
+    def test_scope_to_project_multiple_times(self):
+        """Try to scope the unscoped token multiple times.
+
+        The new tokens should be scoped to:
+
+        * Customers' project
+        * Employees' project
+
+        """
+
+        bodies = (self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN,
+                  self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN)
+        project_ids = (self.proj_employees['id'],
+                       self.proj_customers['id'])
+        for body, project_id_ref in zip(bodies, project_ids):
+            r = self.v3_authenticate_token(body)
+            token_resp = r.result['token']
+            project_id = token_resp['project']['id']
+            self.assertEqual(project_id, project_id_ref)
+            self._check_scoped_token_attributes(token_resp)
+
+    def test_scope_to_project_with_only_inherited_roles(self):
+        """Try to scope token whose only roles are inherited."""
+        self.config_fixture.config(group='os_inherit', enabled=True)
+        r = self.v3_authenticate_token(
+            self.TOKEN_SCOPE_PROJECT_INHERITED_FROM_CUSTOMER)
+        token_resp = r.result['token']
+        project_id = token_resp['project']['id']
+        self.assertEqual(project_id, self.project_inherited['id'])
+        self._check_scoped_token_attributes(token_resp)
+        roles_ref = [self.role_customer]
+        projects_ref = self.project_inherited
+        self._check_projects_and_roles(token_resp, roles_ref, projects_ref)
+
+    def test_scope_token_from_nonexistent_unscoped_token(self):
+        """Try to scope token from non-existent unscoped token."""
+        self.v3_authenticate_token(
+            self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN,
+            expected_status=404)
+
+    def test_issue_token_from_rules_without_user(self):
+        api = auth_controllers.Auth()
+        context = {'environment': {}}
+        self._inject_assertion(context, 'BAD_TESTER_ASSERTION')
+        self.assertRaises(exception.Unauthorized,
+                          api.authenticate_for_token,
+                          context, self.UNSCOPED_V3_SAML2_REQ)
+
+    def test_issue_token_with_nonexistent_group(self):
+        """Inject assertion that matches rule issuing bad group id.
+
+        Expect server to find out that some groups are missing in the
+        backend and raise exception.MappedGroupNotFound exception.
+
+        """
+        self.assertRaises(exception.MappedGroupNotFound,
+                          self._issue_unscoped_token,
+                          assertion='CONTRACTOR_ASSERTION')
+
+    def test_scope_to_domain_once(self):
+        r = self.v3_authenticate_token(self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER)
+        token_resp = r.result['token']
+        domain_id = token_resp['domain']['id']
+        self.assertEqual(self.domainA['id'], domain_id)
+        self._check_scoped_token_attributes(token_resp)
+
+    def test_scope_to_domain_multiple_tokens(self):
+        """Issue multiple tokens scoping to different domains.
+
+        The new tokens should be scoped to:
+
+        * domainA
+        * domainB
+        * domainC
+
+        """
+        bodies = (self.TOKEN_SCOPE_DOMAIN_A_FROM_ADMIN,
+                  self.TOKEN_SCOPE_DOMAIN_B_FROM_ADMIN,
+                  self.TOKEN_SCOPE_DOMAIN_C_FROM_ADMIN)
+        domain_ids = (self.domainA['id'],
+                      self.domainB['id'],
+                      self.domainC['id'])
+
+        for body, domain_id_ref in zip(bodies, domain_ids):
+            r = self.v3_authenticate_token(body)
+            token_resp = r.result['token']
+            domain_id = token_resp['domain']['id']
+            self.assertEqual(domain_id_ref, domain_id)
+            self._check_scoped_token_attributes(token_resp)
+
+    def test_scope_to_domain_with_only_inherited_roles_fails(self):
+        """Try to scope to a domain that has no direct roles."""
+        self.v3_authenticate_token(
+            self.TOKEN_SCOPE_DOMAIN_D_FROM_CUSTOMER,
+            expected_status=401)
+
+    def test_list_projects(self):
+        urls = ('/OS-FEDERATION/projects', '/auth/projects')
+
+        token = (self.tokens['CUSTOMER_ASSERTION'],
+                 self.tokens['EMPLOYEE_ASSERTION'],
+                 self.tokens['ADMIN_ASSERTION'])
+
+        self.config_fixture.config(group='os_inherit', enabled=True)
+        projects_refs = (set([self.proj_customers['id'],
+                              self.project_inherited['id']]),
+                         set([self.proj_employees['id'],
+                              self.project_all['id']]),
+                         set([self.proj_employees['id'],
+                              self.project_all['id'],
+                              self.proj_customers['id'],
+                              self.project_inherited['id']]))
+
+        for token, projects_ref in zip(token, projects_refs):
+            for url in urls:
+                r = self.get(url, token=token)
+                projects_resp = r.result['projects']
+                projects = set(p['id'] for p in projects_resp)
+                self.assertEqual(projects_ref, projects,
+                                 'match failed for url %s' % url)
+
+    def test_list_domains(self):
+        urls = ('/OS-FEDERATION/domains', '/auth/domains')
+
+        tokens = (self.tokens['CUSTOMER_ASSERTION'],
+                  self.tokens['EMPLOYEE_ASSERTION'],
+                  self.tokens['ADMIN_ASSERTION'])
+
+        # NOTE(henry-nash): domain D does not appear in the expected results
+        # since it only had inherited roles (which only apply to projects
+        # within the domain)
+
+        domain_refs = (set([self.domainA['id']]),
+                       set([self.domainA['id'],
+                            self.domainB['id']]),
+                       set([self.domainA['id'],
+                            self.domainB['id'],
+                            self.domainC['id']]))
+
+        for token, domains_ref in zip(tokens, domain_refs):
+            for url in urls:
+                r = self.get(url, token=token)
+                domains_resp = r.result['domains']
+                domains = set(p['id'] for p in domains_resp)
+                self.assertEqual(domains_ref, domains,
+                                 'match failed for url %s' % url)
+
+    def test_full_workflow(self):
+        """Test 'standard' workflow for granting access tokens.
+
+        * Issue unscoped token
+        * List available projects based on groups
+        * Scope token to one of available projects
+
+        """
+
+        r = self._issue_unscoped_token()
+        employee_unscoped_token_id = r.headers.get('X-Subject-Token')
+        r = self.get('/OS-FEDERATION/projects',
+                     token=employee_unscoped_token_id)
+        projects = r.result['projects']
+        random_project = random.randint(0, len(projects)) - 1
+        project = projects[random_project]
+
+        v3_scope_request = self._scope_request(employee_unscoped_token_id,
+                                               'project', project['id'])
+
+        r = self.v3_authenticate_token(v3_scope_request)
+        token_resp = r.result['token']
+        project_id = token_resp['project']['id']
+        self.assertEqual(project['id'], project_id)
+        self._check_scoped_token_attributes(token_resp)
+
+    def test_workflow_with_groups_deletion(self):
+        """Test full workflow with groups deletion before token scoping.
+
+        The test scenario is as follows:
+         - Create group ``group``
+         - Create and assign roles to ``group`` and ``project_all``
+         - Patch mapping rules for existing IdP so it issues group id
+         - Issue unscoped token with ``group``'s id
+         - Delete group ``group``
+         - Scope token to ``project_all``
+         - Expect HTTP 500 response
+
+        """
+        # create group and role
+        group = self.new_group_ref(
+            domain_id=self.domainA['id'])
+        group = self.identity_api.create_group(group)
+        role = self.new_role_ref()
+        self.role_api.create_role(role['id'], role)
+
+        # assign role to group and project_admins
+        self.assignment_api.create_grant(role['id'],
+                                         group_id=group['id'],
+                                         project_id=self.project_all['id'])
+
+        rules = {
+            'rules': [
+                {
+                    'local': [
+                        {
+                            'group': {
+                                'id': group['id']
+                            }
+                        },
+                        {
+                            'user': {
+                                'name': '{0}'
+                            }
+                        }
+                    ],
+                    'remote': [
+                        {
+                            'type': 'UserName'
+                        },
+                        {
+                            'type': 'LastName',
+                            'any_one_of': [
+                                'Account'
+                            ]
+                        }
+                    ]
+                }
+            ]
+        }
+
+        self.federation_api.update_mapping(self.mapping['id'], rules)
+
+        r = self._issue_unscoped_token(assertion='TESTER_ASSERTION')
+        token_id = r.headers.get('X-Subject-Token')
+
+        # delete group
+        self.identity_api.delete_group(group['id'])
+
+        # scope token to project_all, expect HTTP 500
+        scoped_token = self._scope_request(
+            token_id, 'project',
+            self.project_all['id'])
+
+        self.v3_authenticate_token(scoped_token, expected_status=500)
+
+    def test_lists_with_missing_group_in_backend(self):
+        """Test a mapping that points to a group that does not exist
+
+        For explicit mappings, we expect the group to exist in the backend,
+        but for lists, specifically blacklists, a missing group is expected
+        as many groups will be specified by the IdP that are not Keystone
+        groups.
+
+        The test scenario is as follows:
+         - Create group ``EXISTS``
+         - Set mapping rules for existing IdP with a blacklist
+           that passes through as REMOTE_USER_GROUPS
+         - Issue unscoped token with on group  ``EXISTS`` id in it
+
+        """
+        domain_id = self.domainA['id']
+        domain_name = self.domainA['name']
+        group = self.new_group_ref(domain_id=domain_id)
+        group['name'] = 'EXISTS'
+        group = self.identity_api.create_group(group)
+        rules = {
+            'rules': [
+                {
+                    "local": [
+                        {
+                            "user": {
+                                "name": "{0}",
+                                "id": "{0}"
+                            }
+                        }
+                    ],
+                    "remote": [
+                        {
+                            "type": "REMOTE_USER"
+                        }
+                    ]
+                },
+                {
+                    "local": [
+                        {
+                            "groups": "{0}",
+                            "domain": {"name": domain_name}
+                        }
+                    ],
+                    "remote": [
+                        {
+                            "type": "REMOTE_USER_GROUPS",
+                            "blacklist": ["noblacklist"]
+                        }
+                    ]
+                }
+            ]
+        }
+        self.federation_api.update_mapping(self.mapping['id'], rules)
+
+        r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION')
+        assigned_group_ids = r.json['token']['user']['OS-FEDERATION']['groups']
+        self.assertEqual(1, len(assigned_group_ids))
+        self.assertEqual(group['id'], assigned_group_ids[0]['id'])
+
+    def test_assertion_prefix_parameter(self):
+        """Test parameters filtering based on the prefix.
+
+        With ``assertion_prefix`` set to fixed, non default value,
+        issue an unscoped token from assertion EMPLOYEE_ASSERTION_PREFIXED.
+        Expect server to return unscoped token.
+
+        """
+        self.config_fixture.config(group='federation',
+                                   assertion_prefix=self.ASSERTION_PREFIX)
+        r = self._issue_unscoped_token(assertion='EMPLOYEE_ASSERTION_PREFIXED')
+        self.assertIsNotNone(r.headers.get('X-Subject-Token'))
+
+    def test_assertion_prefix_parameter_expect_fail(self):
+        """Test parameters filtering based on the prefix.
+
+        With ``assertion_prefix`` default value set to empty string
+        issue an unscoped token from assertion EMPLOYEE_ASSERTION.
+        Next, configure ``assertion_prefix`` to value ``UserName``.
+        Try issuing unscoped token with EMPLOYEE_ASSERTION.
+        Expect server to raise exception.Unathorized exception.
+
+        """
+        r = self._issue_unscoped_token()
+        self.assertIsNotNone(r.headers.get('X-Subject-Token'))
+        self.config_fixture.config(group='federation',
+                                   assertion_prefix='UserName')
+
+        self.assertRaises(exception.Unauthorized,
+                          self._issue_unscoped_token)
+
+    def test_v2_auth_with_federation_token_fails(self):
+        """Test that using a federation token with v2 auth fails.
+
+        If an admin sets up a federated Keystone environment, and a user
+        incorrectly configures a service (like Nova) to only use v2 auth, the
+        returned message should be informative.
+
+        """
+        r = self._issue_unscoped_token()
+        token_id = r.headers.get('X-Subject-Token')
+        self.assertRaises(exception.Unauthorized,
+                          self.token_provider_api.validate_v2_token,
+                          token_id=token_id)
+
+    def test_unscoped_token_has_user_domain(self):
+        r = self._issue_unscoped_token()
+        self._check_domains_are_valid(r.json_body['token'])
+
+    def test_scoped_token_has_user_domain(self):
+        r = self.v3_authenticate_token(
+            self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
+        self._check_domains_are_valid(r.result['token'])
+
+    def test_issue_unscoped_token_for_local_user(self):
+        r = self._issue_unscoped_token(assertion='LOCAL_USER_ASSERTION')
+        token_resp = r.json_body['token']
+        self.assertListEqual(['saml2'], token_resp['methods'])
+        self.assertEqual(self.user['id'], token_resp['user']['id'])
+        self.assertEqual(self.user['name'], token_resp['user']['name'])
+        self.assertEqual(self.domain['id'], token_resp['user']['domain']['id'])
+        # Make sure the token is not scoped
+        self.assertNotIn('project', token_resp)
+        self.assertNotIn('domain', token_resp)
+
+    def test_issue_token_for_local_user_user_not_found(self):
+        self.assertRaises(exception.Unauthorized,
+                          self._issue_unscoped_token,
+                          assertion='ANOTHER_LOCAL_USER_ASSERTION')
+
+
+class FernetFederatedTokenTests(FederationTests, FederatedSetupMixin):
+    AUTH_METHOD = 'token'
+
+    def load_fixtures(self, fixtures):
+        super(FernetFederatedTokenTests, self).load_fixtures(fixtures)
+        self.load_federation_sample_data()
+
+    def auth_plugin_config_override(self):
+        methods = ['saml2', 'token', 'password']
+        method_classes = dict(
+            password='keystone.auth.plugins.password.Password',
+            token='keystone.auth.plugins.token.Token',
+            saml2='keystone.auth.plugins.saml2.Saml2')
+        super(FernetFederatedTokenTests,
+              self).auth_plugin_config_override(methods, **method_classes)
+        self.config_fixture.config(
+            group='token',
+            provider='keystone.token.providers.fernet.Provider')
+        self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
+
+    def test_federated_unscoped_token(self):
+        resp = self._issue_unscoped_token()
+        self.assertEqual(186, len(resp.headers['X-Subject-Token']))
+
+    def test_federated_unscoped_token_with_multiple_groups(self):
+        assertion = 'ANOTHER_CUSTOMER_ASSERTION'
+        resp = self._issue_unscoped_token(assertion=assertion)
+        self.assertEqual(204, len(resp.headers['X-Subject-Token']))
+
+    def test_validate_federated_unscoped_token(self):
+        resp = self._issue_unscoped_token()
+        unscoped_token = resp.headers.get('X-Subject-Token')
+        # assert that the token we received is valid
+        self.get('/auth/tokens/', headers={'X-Subject-Token': unscoped_token})
+
+    def test_fernet_full_workflow(self):
+        """Test 'standard' workflow for granting Fernet access tokens.
+
+        * Issue unscoped token
+        * List available projects based on groups
+        * Scope token to one of available projects
+
+        """
+        resp = self._issue_unscoped_token()
+        unscoped_token = resp.headers.get('X-Subject-Token')
+        resp = self.get('/OS-FEDERATION/projects',
+                        token=unscoped_token)
+        projects = resp.result['projects']
+        random_project = random.randint(0, len(projects)) - 1
+        project = projects[random_project]
+
+        v3_scope_request = self._scope_request(unscoped_token,
+                                               'project', project['id'])
+
+        resp = self.v3_authenticate_token(v3_scope_request)
+        token_resp = resp.result['token']
+        project_id = token_resp['project']['id']
+        self.assertEqual(project['id'], project_id)
+        self._check_scoped_token_attributes(token_resp)
+
+
+class FederatedTokenTestsMethodToken(FederatedTokenTests):
+    """Test federation operation with unified scoping auth method.
+
+    Test all the operations with auth method set to ``token`` as a new, unified
+    way for scoping all the tokens.
+
+    """
+    AUTH_METHOD = 'token'
+
+    def auth_plugin_config_override(self):
+        methods = ['saml2', 'token']
+        method_classes = dict(
+            token='keystone.auth.plugins.token.Token',
+            saml2='keystone.auth.plugins.saml2.Saml2')
+        super(FederatedTokenTests,
+              self).auth_plugin_config_override(methods, **method_classes)
+
+
+class JsonHomeTests(FederationTests, test_v3.JsonHomeTestMixin):
+    JSON_HOME_DATA = {
+        'http://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/'
+        '1.0/rel/identity_provider': {
+            'href-template': '/OS-FEDERATION/identity_providers/{idp_id}',
+            'href-vars': {
+                'idp_id': 'http://docs.openstack.org/api/openstack-identity/3/'
+                'ext/OS-FEDERATION/1.0/param/idp_id'
+            },
+        },
+    }
+
+
+def _is_xmlsec1_installed():
+    p = subprocess.Popen(
+        ['which', 'xmlsec1'],
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE)
+
+    # invert the return code
+    return not bool(p.wait())
+
+
+def _load_xml(filename):
+    with open(os.path.join(XMLDIR, filename), 'r') as xml:
+        return xml.read()
+
+
+class SAMLGenerationTests(FederationTests):
+
+    SP_AUTH_URL = ('http://beta.com:5000/v3/OS-FEDERATION/identity_providers'
+                   '/BETA/protocols/saml2/auth')
+    ISSUER = 'https://acme.com/FIM/sps/openstack/saml20'
+    RECIPIENT = 'http://beta.com/Shibboleth.sso/SAML2/POST'
+    SUBJECT = 'test_user'
+    ROLES = ['admin', 'member']
+    PROJECT = 'development'
+    SAML_GENERATION_ROUTE = '/auth/OS-FEDERATION/saml2'
+    ASSERTION_VERSION = "2.0"
+    SERVICE_PROVDIER_ID = 'ACME'
+
+    def sp_ref(self):
+        ref = {
+            'auth_url': self.SP_AUTH_URL,
+            'enabled': True,
+            'description': uuid.uuid4().hex,
+            'sp_url': self.RECIPIENT,
+
+        }
+        return ref
+
+    def setUp(self):
+        super(SAMLGenerationTests, self).setUp()
+        self.signed_assertion = saml2.create_class_from_xml_string(
+            saml.Assertion, _load_xml('signed_saml2_assertion.xml'))
+        self.sp = self.sp_ref()
+        self.federation_api.create_sp(self.SERVICE_PROVDIER_ID, self.sp)
+
+    def test_samlize_token_values(self):
+        """Test the SAML generator produces a SAML object.
+
+        Test the SAML generator directly by passing known arguments, the result
+        should be a SAML object that consistently includes attributes based on
+        the known arguments that were passed in.
+
+        """
+        with mock.patch.object(keystone_idp, '_sign_assertion',
+                               return_value=self.signed_assertion):
+            generator = keystone_idp.SAMLGenerator()
+            response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
+                                               self.SUBJECT, self.ROLES,
+                                               self.PROJECT)
+
+        assertion = response.assertion
+        self.assertIsNotNone(assertion)
+        self.assertIsInstance(assertion, saml.Assertion)
+        issuer = response.issuer
+        self.assertEqual(self.RECIPIENT, response.destination)
+        self.assertEqual(self.ISSUER, issuer.text)
+
+        user_attribute = assertion.attribute_statement[0].attribute[0]
+        self.assertEqual(self.SUBJECT, user_attribute.attribute_value[0].text)
+
+        role_attribute = assertion.attribute_statement[0].attribute[1]
+        for attribute_value in role_attribute.attribute_value:
+            self.assertIn(attribute_value.text, self.ROLES)
+
+        project_attribute = assertion.attribute_statement[0].attribute[2]
+        self.assertEqual(self.PROJECT,
+                         project_attribute.attribute_value[0].text)
+
+    def test_verify_assertion_object(self):
+        """Test that the Assertion object is built properly.
+
+        The Assertion doesn't need to be signed in this test, so
+        _sign_assertion method is patched and doesn't alter the assertion.
+
+        """
+        with mock.patch.object(keystone_idp, '_sign_assertion',
+                               side_effect=lambda x: x):
+            generator = keystone_idp.SAMLGenerator()
+            response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
+                                               self.SUBJECT, self.ROLES,
+                                               self.PROJECT)
+        assertion = response.assertion
+        self.assertEqual(self.ASSERTION_VERSION, assertion.version)
+
+    def test_valid_saml_xml(self):
+        """Test the generated SAML object can become valid XML.
+
+        Test the generator directly by passing known arguments, the result
+        should be a SAML object that consistently includes attributes based on
+        the known arguments that were passed in.
+
+        """
+        with mock.patch.object(keystone_idp, '_sign_assertion',
+                               return_value=self.signed_assertion):
+            generator = keystone_idp.SAMLGenerator()
+            response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
+                                               self.SUBJECT, self.ROLES,
+                                               self.PROJECT)
+
+        saml_str = response.to_string()
+        response = etree.fromstring(saml_str)
+        issuer = response[0]
+        assertion = response[2]
+
+        self.assertEqual(self.RECIPIENT, response.get('Destination'))
+        self.assertEqual(self.ISSUER, issuer.text)
+
+        user_attribute = assertion[4][0]
+        self.assertEqual(self.SUBJECT, user_attribute[0].text)
+
+        role_attribute = assertion[4][1]
+        for attribute_value in role_attribute:
+            self.assertIn(attribute_value.text, self.ROLES)
+
+        project_attribute = assertion[4][2]
+        self.assertEqual(self.PROJECT, project_attribute[0].text)
+
+    def test_assertion_using_explicit_namespace_prefixes(self):
+        def mocked_subprocess_check_output(*popenargs, **kwargs):
+            # the last option is the assertion file to be signed
+            filename = popenargs[0][-1]
+            with open(filename, 'r') as f:
+                assertion_content = f.read()
+            # since we are not testing the signature itself, we can return
+            # the assertion as is without signing it
+            return assertion_content
+
+        with mock.patch('subprocess.check_output',
+                        side_effect=mocked_subprocess_check_output):
+            generator = keystone_idp.SAMLGenerator()
+            response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
+                                               self.SUBJECT, self.ROLES,
+                                               self.PROJECT)
+            assertion_xml = response.assertion.to_string()
+            # make sure we have the proper tag and prefix for the assertion
+            # namespace
+            self.assertIn('<saml:Assertion', assertion_xml)
+            self.assertIn('xmlns:saml="' + saml2.NAMESPACE + '"',
+                          assertion_xml)
+            self.assertIn('xmlns:xmldsig="' + xmldsig.NAMESPACE + '"',
+                          assertion_xml)
+
+    def test_saml_signing(self):
+        """Test that the SAML generator produces a SAML object.
+
+        Test the SAML generator directly by passing known arguments, the result
+        should be a SAML object that consistently includes attributes based on
+        the known arguments that were passed in.
+
+        """
+        if not _is_xmlsec1_installed():
+            self.skip('xmlsec1 is not installed')
+
+        generator = keystone_idp.SAMLGenerator()
+        response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
+                                           self.SUBJECT, self.ROLES,
+                                           self.PROJECT)
+
+        signature = response.assertion.signature
+        self.assertIsNotNone(signature)
+        self.assertIsInstance(signature, xmldsig.Signature)
+
+        idp_public_key = sigver.read_cert_from_file(CONF.saml.certfile, 'pem')
+        cert_text = signature.key_info.x509_data[0].x509_certificate.text
+        # NOTE(stevemar): Rather than one line of text, the certificate is
+        # printed with newlines for readability, we remove these so we can
+        # match it with the key that we used.
+        cert_text = cert_text.replace(os.linesep, '')
+        self.assertEqual(idp_public_key, cert_text)
+
+    def _create_generate_saml_request(self, token_id, sp_id):
+        return {
+            "auth": {
+                "identity": {
+                    "methods": [
+                        "token"
+                    ],
+                    "token": {
+                        "id": token_id
+                    }
+                },
+                "scope": {
+                    "service_provider": {
+                        "id": sp_id
+                    }
+                }
+            }
+        }
+
+    def _fetch_valid_token(self):
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=self.project['id'])
+        resp = self.v3_authenticate_token(auth_data)
+        token_id = resp.headers.get('X-Subject-Token')
+        return token_id
+
+    def _fetch_domain_scoped_token(self):
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            user_domain_id=self.domain['id'])
+        resp = self.v3_authenticate_token(auth_data)
+        token_id = resp.headers.get('X-Subject-Token')
+        return token_id
+
+    def test_not_project_scoped_token(self):
+        """Ensure SAML generation fails when passing domain-scoped tokens.
+
+        The server should return a 403 Forbidden Action.
+
+        """
+        self.config_fixture.config(group='saml', idp_entity_id=self.ISSUER)
+        token_id = self._fetch_domain_scoped_token()
+        body = self._create_generate_saml_request(token_id,
+                                                  self.SERVICE_PROVDIER_ID)
+        with mock.patch.object(keystone_idp, '_sign_assertion',
+                               return_value=self.signed_assertion):
+            self.post(self.SAML_GENERATION_ROUTE, body=body,
+                      expected_status=403)
+
+    def test_generate_saml_route(self):
+        """Test that the SAML generation endpoint produces XML.
+
+        The SAML endpoint /v3/auth/OS-FEDERATION/saml2 should take as input,
+        a scoped token ID, and a Service Provider ID.
+        The controller should fetch details about the user from the token,
+        and details about the service provider from its ID.
+        This should be enough information to invoke the SAML generator and
+        provide a valid SAML (XML) document back.
+
+        """
+        self.config_fixture.config(group='saml', idp_entity_id=self.ISSUER)
+        token_id = self._fetch_valid_token()
+        body = self._create_generate_saml_request(token_id,
+                                                  self.SERVICE_PROVDIER_ID)
+
+        with mock.patch.object(keystone_idp, '_sign_assertion',
+                               return_value=self.signed_assertion):
+            http_response = self.post(self.SAML_GENERATION_ROUTE, body=body,
+                                      response_content_type='text/xml',
+                                      expected_status=200)
+
+        response = etree.fromstring(http_response.result)
+        issuer = response[0]
+        assertion = response[2]
+
+        self.assertEqual(self.RECIPIENT, response.get('Destination'))
+        self.assertEqual(self.ISSUER, issuer.text)
+
+        # NOTE(stevemar): We should test this against expected values,
+        # but the self.xyz attribute names are uuids, and we mock out
+        # the result. Ideally we should update the mocked result with
+        # some known data, and create the roles/project/user before
+        # these tests run.
+        user_attribute = assertion[4][0]
+        self.assertIsInstance(user_attribute[0].text, str)
+
+        role_attribute = assertion[4][1]
+        self.assertIsInstance(role_attribute[0].text, str)
+
+        project_attribute = assertion[4][2]
+        self.assertIsInstance(project_attribute[0].text, str)
+
+    def test_invalid_scope_body(self):
+        """Test that missing the scope in request body raises an exception.
+
+        Raises exception.SchemaValidationError() - error code 400
+
+        """
+
+        token_id = uuid.uuid4().hex
+        body = self._create_generate_saml_request(token_id,
+                                                  self.SERVICE_PROVDIER_ID)
+        del body['auth']['scope']
+
+        self.post(self.SAML_GENERATION_ROUTE, body=body, expected_status=400)
+
+    def test_invalid_token_body(self):
+        """Test that missing the token in request body raises an exception.
+
+        Raises exception.SchemaValidationError() - error code 400
+
+        """
+
+        token_id = uuid.uuid4().hex
+        body = self._create_generate_saml_request(token_id,
+                                                  self.SERVICE_PROVDIER_ID)
+        del body['auth']['identity']['token']
+
+        self.post(self.SAML_GENERATION_ROUTE, body=body, expected_status=400)
+
+    def test_sp_not_found(self):
+        """Test SAML generation with an invalid service provider ID.
+
+        Raises exception.ServiceProviderNotFound() - error code 404
+
+        """
+        sp_id = uuid.uuid4().hex
+        token_id = self._fetch_valid_token()
+        body = self._create_generate_saml_request(token_id, sp_id)
+        self.post(self.SAML_GENERATION_ROUTE, body=body, expected_status=404)
+
+    def test_sp_disabled(self):
+        """Try generating assertion for disabled Service Provider."""
+
+        # Disable Service Provider
+        sp_ref = {'enabled': False}
+        self.federation_api.update_sp(self.SERVICE_PROVDIER_ID, sp_ref)
+
+        token_id = self._fetch_valid_token()
+        body = self._create_generate_saml_request(token_id,
+                                                  self.SERVICE_PROVDIER_ID)
+        self.post(self.SAML_GENERATION_ROUTE, body=body, expected_status=403)
+
+    def test_token_not_found(self):
+        """Test that an invalid token in the request body raises an exception.
+
+        Raises exception.TokenNotFound() - error code 404
+
+        """
+
+        token_id = uuid.uuid4().hex
+        body = self._create_generate_saml_request(token_id,
+                                                  self.SERVICE_PROVDIER_ID)
+        self.post(self.SAML_GENERATION_ROUTE, body=body, expected_status=404)
+
+
+class IdPMetadataGenerationTests(FederationTests):
+    """A class for testing Identity Provider Metadata generation."""
+
+    METADATA_URL = '/OS-FEDERATION/saml2/metadata'
+
+    def setUp(self):
+        super(IdPMetadataGenerationTests, self).setUp()
+        self.generator = keystone_idp.MetadataGenerator()
+
+    def config_overrides(self):
+        super(IdPMetadataGenerationTests, self).config_overrides()
+        self.config_fixture.config(
+            group='saml',
+            idp_entity_id=federation_fixtures.IDP_ENTITY_ID,
+            idp_sso_endpoint=federation_fixtures.IDP_SSO_ENDPOINT,
+            idp_organization_name=federation_fixtures.IDP_ORGANIZATION_NAME,
+            idp_organization_display_name=(
+                federation_fixtures.IDP_ORGANIZATION_DISPLAY_NAME),
+            idp_organization_url=federation_fixtures.IDP_ORGANIZATION_URL,
+            idp_contact_company=federation_fixtures.IDP_CONTACT_COMPANY,
+            idp_contact_name=federation_fixtures.IDP_CONTACT_GIVEN_NAME,
+            idp_contact_surname=federation_fixtures.IDP_CONTACT_SURNAME,
+            idp_contact_email=federation_fixtures.IDP_CONTACT_EMAIL,
+            idp_contact_telephone=(
+                federation_fixtures.IDP_CONTACT_TELEPHONE_NUMBER),
+            idp_contact_type=federation_fixtures.IDP_CONTACT_TYPE)
+
+    def test_check_entity_id(self):
+        metadata = self.generator.generate_metadata()
+        self.assertEqual(federation_fixtures.IDP_ENTITY_ID, metadata.entity_id)
+
+    def test_metadata_validity(self):
+        """Call md.EntityDescriptor method that does internal verification."""
+        self.generator.generate_metadata().verify()
+
+    def test_serialize_metadata_object(self):
+        """Check whether serialization doesn't raise any exceptions."""
+        self.generator.generate_metadata().to_string()
+        # TODO(marek-denis): Check values here
+
+    def test_check_idp_sso(self):
+        metadata = self.generator.generate_metadata()
+        idpsso_descriptor = metadata.idpsso_descriptor
+        self.assertIsNotNone(metadata.idpsso_descriptor)
+        self.assertEqual(federation_fixtures.IDP_SSO_ENDPOINT,
+                         idpsso_descriptor.single_sign_on_service.location)
+
+        self.assertIsNotNone(idpsso_descriptor.organization)
+        organization = idpsso_descriptor.organization
+        self.assertEqual(federation_fixtures.IDP_ORGANIZATION_DISPLAY_NAME,
+                         organization.organization_display_name.text)
+        self.assertEqual(federation_fixtures.IDP_ORGANIZATION_NAME,
+                         organization.organization_name.text)
+        self.assertEqual(federation_fixtures.IDP_ORGANIZATION_URL,
+                         organization.organization_url.text)
+
+        self.assertIsNotNone(idpsso_descriptor.contact_person)
+        contact_person = idpsso_descriptor.contact_person
+
+        self.assertEqual(federation_fixtures.IDP_CONTACT_GIVEN_NAME,
+                         contact_person.given_name.text)
+        self.assertEqual(federation_fixtures.IDP_CONTACT_SURNAME,
+                         contact_person.sur_name.text)
+        self.assertEqual(federation_fixtures.IDP_CONTACT_EMAIL,
+                         contact_person.email_address.text)
+        self.assertEqual(federation_fixtures.IDP_CONTACT_TELEPHONE_NUMBER,
+                         contact_person.telephone_number.text)
+        self.assertEqual(federation_fixtures.IDP_CONTACT_TYPE,
+                         contact_person.contact_type)
+
+    def test_metadata_no_organization(self):
+        self.config_fixture.config(
+            group='saml',
+            idp_organization_display_name=None,
+            idp_organization_url=None,
+            idp_organization_name=None)
+        metadata = self.generator.generate_metadata()
+        idpsso_descriptor = metadata.idpsso_descriptor
+        self.assertIsNotNone(metadata.idpsso_descriptor)
+        self.assertIsNone(idpsso_descriptor.organization)
+        self.assertIsNotNone(idpsso_descriptor.contact_person)
+
+    def test_metadata_no_contact_person(self):
+        self.config_fixture.config(
+            group='saml',
+            idp_contact_name=None,
+            idp_contact_surname=None,
+            idp_contact_email=None,
+            idp_contact_telephone=None)
+        metadata = self.generator.generate_metadata()
+        idpsso_descriptor = metadata.idpsso_descriptor
+        self.assertIsNotNone(metadata.idpsso_descriptor)
+        self.assertIsNotNone(idpsso_descriptor.organization)
+        self.assertEqual([], idpsso_descriptor.contact_person)
+
+    def test_metadata_invalid_contact_type(self):
+        self.config_fixture.config(
+            group='saml',
+            idp_contact_type="invalid")
+        self.assertRaises(exception.ValidationError,
+                          self.generator.generate_metadata)
+
+    def test_metadata_invalid_idp_sso_endpoint(self):
+        self.config_fixture.config(
+            group='saml',
+            idp_sso_endpoint=None)
+        self.assertRaises(exception.ValidationError,
+                          self.generator.generate_metadata)
+
+    def test_metadata_invalid_idp_entity_id(self):
+        self.config_fixture.config(
+            group='saml',
+            idp_entity_id=None)
+        self.assertRaises(exception.ValidationError,
+                          self.generator.generate_metadata)
+
+    def test_get_metadata_with_no_metadata_file_configured(self):
+        self.get(self.METADATA_URL, expected_status=500)
+
+    def test_get_metadata(self):
+        self.config_fixture.config(
+            group='saml', idp_metadata_path=XMLDIR + '/idp_saml2_metadata.xml')
+        r = self.get(self.METADATA_URL, response_content_type='text/xml',
+                     expected_status=200)
+        self.assertEqual('text/xml', r.headers.get('Content-Type'))
+
+        reference_file = _load_xml('idp_saml2_metadata.xml')
+        self.assertEqual(reference_file, r.result)
+
+
+class ServiceProviderTests(FederationTests):
+    """A test class for Service Providers."""
+
+    MEMBER_NAME = 'service_provider'
+    COLLECTION_NAME = 'service_providers'
+    SERVICE_PROVIDER_ID = 'ACME'
+    SP_KEYS = ['auth_url', 'id', 'enabled', 'description', 'sp_url']
+
+    def setUp(self):
+        super(FederationTests, self).setUp()
+        # Add a Service Provider
+        url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
+        self.SP_REF = self.sp_ref()
+        self.SERVICE_PROVIDER = self.put(
+            url, body={'service_provider': self.SP_REF},
+            expected_status=201).result
+
+    def sp_ref(self):
+        ref = {
+            'auth_url': 'https://' + uuid.uuid4().hex + '.com',
+            'enabled': True,
+            'description': uuid.uuid4().hex,
+            'sp_url': 'https://' + uuid.uuid4().hex + '.com',
+        }
+        return ref
+
+    def base_url(self, suffix=None):
+        if suffix is not None:
+            return '/OS-FEDERATION/service_providers/' + str(suffix)
+        return '/OS-FEDERATION/service_providers'
+
+    def test_get_service_provider(self):
+        url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
+        resp = self.get(url, expected_status=200)
+        self.assertValidEntity(resp.result['service_provider'],
+                               keys_to_check=self.SP_KEYS)
+
+    def test_get_service_provider_fail(self):
+        url = self.base_url(suffix=uuid.uuid4().hex)
+        self.get(url, expected_status=404)
+
+    def test_create_service_provider(self):
+        url = self.base_url(suffix=uuid.uuid4().hex)
+        sp = self.sp_ref()
+        resp = self.put(url, body={'service_provider': sp},
+                        expected_status=201)
+        self.assertValidEntity(resp.result['service_provider'],
+                               keys_to_check=self.SP_KEYS)
+
+    def test_create_service_provider_fail(self):
+        """Try adding SP object with unallowed attribute."""
+        url = self.base_url(suffix=uuid.uuid4().hex)
+        sp = self.sp_ref()
+        sp[uuid.uuid4().hex] = uuid.uuid4().hex
+        self.put(url, body={'service_provider': sp},
+                 expected_status=400)
+
+    def test_list_service_providers(self):
+        """Test listing of service provider objects.
+
+        Add two new service providers. List all available service providers.
+        Expect to get list of three service providers (one created by setUp())
+        Test if attributes match.
+
+        """
+        ref_service_providers = {
+            uuid.uuid4().hex: self.sp_ref(),
+            uuid.uuid4().hex: self.sp_ref(),
+        }
+        for id, sp in ref_service_providers.items():
+            url = self.base_url(suffix=id)
+            self.put(url, body={'service_provider': sp}, expected_status=201)
+
+        # Insert ids into service provider object, we will compare it with
+        # responses from server and those include 'id' attribute.
+
+        ref_service_providers[self.SERVICE_PROVIDER_ID] = self.SP_REF
+        for id, sp in ref_service_providers.items():
+            sp['id'] = id
+
+        url = self.base_url()
+        resp = self.get(url)
+        service_providers = resp.result
+        for service_provider in service_providers['service_providers']:
+            id = service_provider['id']
+            self.assertValidEntity(
+                service_provider, ref=ref_service_providers[id],
+                keys_to_check=self.SP_KEYS)
+
+    def test_update_service_provider(self):
+        """Update existing service provider.
+
+        Update default existing service provider and make sure it has been
+        properly changed.
+
+        """
+        new_sp_ref = self.sp_ref()
+        url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
+        resp = self.patch(url, body={'service_provider': new_sp_ref},
+                          expected_status=200)
+        patch_result = resp.result
+        new_sp_ref['id'] = self.SERVICE_PROVIDER_ID
+        self.assertValidEntity(patch_result['service_provider'],
+                               ref=new_sp_ref,
+                               keys_to_check=self.SP_KEYS)
+
+        resp = self.get(url, expected_status=200)
+        get_result = resp.result
+
+        self.assertDictEqual(patch_result['service_provider'],
+                             get_result['service_provider'])
+
+    def test_update_service_provider_immutable_parameters(self):
+        """Update immutable attributes in service provider.
+
+        In this particular case the test will try to change ``id`` attribute.
+        The server should return an HTTP 403 error code.
+
+        """
+        new_sp_ref = {'id': uuid.uuid4().hex}
+        url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
+        self.patch(url, body={'service_provider': new_sp_ref},
+                   expected_status=400)
+
+    def test_update_service_provider_unknown_parameter(self):
+        new_sp_ref = self.sp_ref()
+        new_sp_ref[uuid.uuid4().hex] = uuid.uuid4().hex
+        url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
+        self.patch(url, body={'service_provider': new_sp_ref},
+                   expected_status=400)
+
+    def test_update_service_provider_404(self):
+        new_sp_ref = self.sp_ref()
+        new_sp_ref['description'] = uuid.uuid4().hex
+        url = self.base_url(suffix=uuid.uuid4().hex)
+        self.patch(url, body={'service_provider': new_sp_ref},
+                   expected_status=404)
+
+    def test_delete_service_provider(self):
+        url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
+        self.delete(url, expected_status=204)
+
+    def test_delete_service_provider_404(self):
+        url = self.base_url(suffix=uuid.uuid4().hex)
+        self.delete(url, expected_status=404)
+
+
+class WebSSOTests(FederatedTokenTests):
+    """A class for testing Web SSO."""
+
+    SSO_URL = '/auth/OS-FEDERATION/websso/'
+    SSO_TEMPLATE_NAME = 'sso_callback_template.html'
+    SSO_TEMPLATE_PATH = os.path.join(core.dirs.etc(), SSO_TEMPLATE_NAME)
+    TRUSTED_DASHBOARD = 'http://horizon.com'
+    ORIGIN = urllib.parse.quote_plus(TRUSTED_DASHBOARD)
+
+    def setUp(self):
+        super(WebSSOTests, self).setUp()
+        self.api = federation_controllers.Auth()
+
+    def config_overrides(self):
+        super(WebSSOTests, self).config_overrides()
+        self.config_fixture.config(
+            group='federation',
+            trusted_dashboard=[self.TRUSTED_DASHBOARD],
+            sso_callback_template=self.SSO_TEMPLATE_PATH,
+            remote_id_attribute=self.REMOTE_ID_ATTR)
+
+    def test_render_callback_template(self):
+        token_id = uuid.uuid4().hex
+        resp = self.api.render_html_response(self.TRUSTED_DASHBOARD, token_id)
+        self.assertIn(token_id, resp.body)
+        self.assertIn(self.TRUSTED_DASHBOARD, resp.body)
+
+    def test_federated_sso_auth(self):
+        environment = {self.REMOTE_ID_ATTR: self.REMOTE_ID}
+        context = {'environment': environment}
+        query_string = {'origin': self.ORIGIN}
+        self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
+        resp = self.api.federated_sso_auth(context, self.PROTOCOL)
+        self.assertIn(self.TRUSTED_DASHBOARD, resp.body)
+
+    def test_federated_sso_auth_bad_remote_id(self):
+        environment = {self.REMOTE_ID_ATTR: self.IDP}
+        context = {'environment': environment}
+        query_string = {'origin': self.ORIGIN}
+        self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
+        self.assertRaises(exception.IdentityProviderNotFound,
+                          self.api.federated_sso_auth,
+                          context, self.PROTOCOL)
+
+    def test_federated_sso_missing_query(self):
+        environment = {self.REMOTE_ID_ATTR: self.REMOTE_ID}
+        context = {'environment': environment}
+        self._inject_assertion(context, 'EMPLOYEE_ASSERTION')
+        self.assertRaises(exception.ValidationError,
+                          self.api.federated_sso_auth,
+                          context, self.PROTOCOL)
+
+    def test_federated_sso_missing_query_bad_remote_id(self):
+        environment = {self.REMOTE_ID_ATTR: self.IDP}
+        context = {'environment': environment}
+        self._inject_assertion(context, 'EMPLOYEE_ASSERTION')
+        self.assertRaises(exception.ValidationError,
+                          self.api.federated_sso_auth,
+                          context, self.PROTOCOL)
+
+    def test_federated_sso_untrusted_dashboard(self):
+        environment = {self.REMOTE_ID_ATTR: self.REMOTE_ID}
+        context = {'environment': environment}
+        query_string = {'origin': uuid.uuid4().hex}
+        self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
+        self.assertRaises(exception.Unauthorized,
+                          self.api.federated_sso_auth,
+                          context, self.PROTOCOL)
+
+    def test_federated_sso_untrusted_dashboard_bad_remote_id(self):
+        environment = {self.REMOTE_ID_ATTR: self.IDP}
+        context = {'environment': environment}
+        query_string = {'origin': uuid.uuid4().hex}
+        self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
+        self.assertRaises(exception.Unauthorized,
+                          self.api.federated_sso_auth,
+                          context, self.PROTOCOL)
+
+    def test_federated_sso_missing_remote_id(self):
+        context = {'environment': {}}
+        query_string = {'origin': self.ORIGIN}
+        self._inject_assertion(context, 'EMPLOYEE_ASSERTION', query_string)
+        self.assertRaises(exception.Unauthorized,
+                          self.api.federated_sso_auth,
+                          context, self.PROTOCOL)
+
+
+class K2KServiceCatalogTests(FederationTests):
+    SP1 = 'SP1'
+    SP2 = 'SP2'
+    SP3 = 'SP3'
+
+    def setUp(self):
+        super(K2KServiceCatalogTests, self).setUp()
+
+        sp = self.sp_ref()
+        self.federation_api.create_sp(self.SP1, sp)
+        self.sp_alpha = {self.SP1: sp}
+
+        sp = self.sp_ref()
+        self.federation_api.create_sp(self.SP2, sp)
+        self.sp_beta = {self.SP2: sp}
+
+        sp = self.sp_ref()
+        self.federation_api.create_sp(self.SP3, sp)
+        self.sp_gamma = {self.SP3: sp}
+
+        self.token_v3_helper = token_common.V3TokenDataHelper()
+
+    def sp_response(self, id, ref):
+        ref.pop('enabled')
+        ref.pop('description')
+        ref['id'] = id
+        return ref
+
+    def sp_ref(self):
+        ref = {
+            'auth_url': uuid.uuid4().hex,
+            'enabled': True,
+            'description': uuid.uuid4().hex,
+            'sp_url': uuid.uuid4().hex,
+        }
+        return ref
+
+    def _validate_service_providers(self, token, ref):
+        token_data = token['token']
+        self.assertIn('service_providers', token_data)
+        self.assertIsNotNone(token_data['service_providers'])
+        service_providers = token_data.get('service_providers')
+
+        self.assertEqual(len(ref), len(service_providers))
+        for entity in service_providers:
+            id = entity.get('id')
+            ref_entity = self.sp_response(id, ref.get(id))
+            self.assertDictEqual(ref_entity, entity)
+
+    def test_service_providers_in_token(self):
+        """Check if service providers are listed in service catalog."""
+
+        token = self.token_v3_helper.get_token_data(self.user_id, ['password'])
+        ref = {}
+        for r in (self.sp_alpha, self.sp_beta, self.sp_gamma):
+            ref.update(r)
+        self._validate_service_providers(token, ref)
+
+    def test_service_provides_in_token_disabled_sp(self):
+        """Test behaviour with disabled service providers.
+
+        Disabled service providers should not be listed in the service
+        catalog.
+
+        """
+        # disable service provider ALPHA
+        sp_ref = {'enabled': False}
+        self.federation_api.update_sp(self.SP1, sp_ref)
+
+        token = self.token_v3_helper.get_token_data(self.user_id, ['password'])
+        ref = {}
+        for r in (self.sp_beta, self.sp_gamma):
+            ref.update(r)
+        self._validate_service_providers(token, ref)
+
+    def test_no_service_providers_in_token(self):
+        """Test service catalog with disabled service providers.
+
+        There should be no entry ``service_providers`` in the catalog.
+        Test passes providing no attribute was raised.
+
+        """
+        sp_ref = {'enabled': False}
+        for sp in (self.SP1, self.SP2, self.SP3):
+            self.federation_api.update_sp(sp, sp_ref)
+
+        token = self.token_v3_helper.get_token_data(self.user_id, ['password'])
+        self.assertNotIn('service_providers', token['token'],
+                         message=('Expected Service Catalog not to have '
+                                  'service_providers'))
diff --git a/keystone-moon/keystone/tests/unit/test_v3_filters.py b/keystone-moon/keystone/tests/unit/test_v3_filters.py
new file mode 100644 (file)
index 0000000..4ad4465
--- /dev/null
@@ -0,0 +1,452 @@
+# Copyright 2012 OpenStack LLC
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from oslo_config import cfg
+from oslo_serialization import jsonutils
+
+from keystone.tests.unit import filtering
+from keystone.tests.unit.ksfixtures import temporaryfile
+from keystone.tests.unit import test_v3
+
+
+CONF = cfg.CONF
+
+
+class IdentityTestFilteredCase(filtering.FilterTests,
+                               test_v3.RestfulTestCase):
+    """Test filter enforcement on the v3 Identity API."""
+
+    def setUp(self):
+        """Setup for Identity Filter Test Cases."""
+
+        super(IdentityTestFilteredCase, self).setUp()
+        self.tempfile = self.useFixture(temporaryfile.SecureTempFile())
+        self.tmpfilename = self.tempfile.file_name
+        self.config_fixture.config(group='oslo_policy',
+                                   policy_file=self.tmpfilename)
+
+    def load_sample_data(self):
+        """Create sample data for these tests.
+
+        As well as the usual housekeeping, create a set of domains,
+        users, roles and projects for the subsequent tests:
+
+        - Three domains: A,B & C.  C is disabled.
+        - DomainA has user1, DomainB has user2 and user3
+        - DomainA has group1 and group2, DomainB has group3
+        - User1 has a role on DomainA
+
+        Remember that there will also be a fourth domain in existence,
+        the default domain.
+
+        """
+        # Start by creating a few domains
+        self._populate_default_domain()
+        self.domainA = self.new_domain_ref()
+        self.resource_api.create_domain(self.domainA['id'], self.domainA)
+        self.domainB = self.new_domain_ref()
+        self.resource_api.create_domain(self.domainB['id'], self.domainB)
+        self.domainC = self.new_domain_ref()
+        self.domainC['enabled'] = False
+        self.resource_api.create_domain(self.domainC['id'], self.domainC)
+
+        # Now create some users, one in domainA and two of them in domainB
+        self.user1 = self.new_user_ref(domain_id=self.domainA['id'])
+        password = uuid.uuid4().hex
+        self.user1['password'] = password
+        self.user1 = self.identity_api.create_user(self.user1)
+        self.user1['password'] = password
+
+        self.user2 = self.new_user_ref(domain_id=self.domainB['id'])
+        self.user2['password'] = password
+        self.user2 = self.identity_api.create_user(self.user2)
+        self.user2['password'] = password
+
+        self.user3 = self.new_user_ref(domain_id=self.domainB['id'])
+        self.user3['password'] = password
+        self.user3 = self.identity_api.create_user(self.user3)
+        self.user3['password'] = password
+
+        self.role = self.new_role_ref()
+        self.role_api.create_role(self.role['id'], self.role)
+        self.assignment_api.create_grant(self.role['id'],
+                                         user_id=self.user1['id'],
+                                         domain_id=self.domainA['id'])
+
+        # A default auth request we can use - un-scoped user token
+        self.auth = self.build_authentication_request(
+            user_id=self.user1['id'],
+            password=self.user1['password'])
+
+    def _get_id_list_from_ref_list(self, ref_list):
+        result_list = []
+        for x in ref_list:
+            result_list.append(x['id'])
+        return result_list
+
+    def _set_policy(self, new_policy):
+        with open(self.tmpfilename, "w") as policyfile:
+            policyfile.write(jsonutils.dumps(new_policy))
+
+    def test_list_users_filtered_by_domain(self):
+        """GET /users?domain_id=mydomain (filtered)
+
+        Test Plan:
+
+        - Update policy so api is unprotected
+        - Use an un-scoped token to make sure we can filter the
+          users by domainB, getting back the 2 users in that domain
+
+        """
+        self._set_policy({"identity:list_users": []})
+        url_by_name = '/users?domain_id=%s' % self.domainB['id']
+        r = self.get(url_by_name, auth=self.auth)
+        # We should  get back two users, those in DomainB
+        id_list = self._get_id_list_from_ref_list(r.result.get('users'))
+        self.assertIn(self.user2['id'], id_list)
+        self.assertIn(self.user3['id'], id_list)
+
+    def test_list_filtered_domains(self):
+        """GET /domains?enabled=0
+
+        Test Plan:
+
+        - Update policy for no protection on api
+        - Filter by the 'enabled' boolean to get disabled domains, which
+          should return just domainC
+        - Try the filter using different ways of specifying True/False
+          to test that our handling of booleans in filter matching is
+          correct
+
+        """
+        new_policy = {"identity:list_domains": []}
+        self._set_policy(new_policy)
+        r = self.get('/domains?enabled=0', auth=self.auth)
+        id_list = self._get_id_list_from_ref_list(r.result.get('domains'))
+        self.assertEqual(1, len(id_list))
+        self.assertIn(self.domainC['id'], id_list)
+
+        # Try a few ways of specifying 'false'
+        for val in ('0', 'false', 'False', 'FALSE', 'n', 'no', 'off'):
+            r = self.get('/domains?enabled=%s' % val, auth=self.auth)
+            id_list = self._get_id_list_from_ref_list(r.result.get('domains'))
+            self.assertEqual([self.domainC['id']], id_list)
+
+        # Now try a few ways of specifying 'true' when we should get back
+        # the other two domains, plus the default domain
+        for val in ('1', 'true', 'True', 'TRUE', 'y', 'yes', 'on'):
+            r = self.get('/domains?enabled=%s' % val, auth=self.auth)
+            id_list = self._get_id_list_from_ref_list(r.result.get('domains'))
+            self.assertEqual(3, len(id_list))
+            self.assertIn(self.domainA['id'], id_list)
+            self.assertIn(self.domainB['id'], id_list)
+            self.assertIn(CONF.identity.default_domain_id, id_list)
+
+        r = self.get('/domains?enabled', auth=self.auth)
+        id_list = self._get_id_list_from_ref_list(r.result.get('domains'))
+        self.assertEqual(3, len(id_list))
+        self.assertIn(self.domainA['id'], id_list)
+        self.assertIn(self.domainB['id'], id_list)
+        self.assertIn(CONF.identity.default_domain_id, id_list)
+
+    def test_multiple_filters(self):
+        """GET /domains?enabled&name=myname
+
+        Test Plan:
+
+        - Update policy for no protection on api
+        - Filter by the 'enabled' boolean and name - this should
+          return a single domain
+
+        """
+        new_policy = {"identity:list_domains": []}
+        self._set_policy(new_policy)
+
+        my_url = '/domains?enabled&name=%s' % self.domainA['name']
+        r = self.get(my_url, auth=self.auth)
+        id_list = self._get_id_list_from_ref_list(r.result.get('domains'))
+        self.assertEqual(1, len(id_list))
+        self.assertIn(self.domainA['id'], id_list)
+        self.assertIs(True, r.result.get('domains')[0]['enabled'])
+
+    def test_invalid_filter_is_ignored(self):
+        """GET /domains?enableds&name=myname
+
+        Test Plan:
+
+        - Update policy for no protection on api
+        - Filter by name and 'enableds', which does not exist
+        - Assert 'enableds' is ignored
+
+        """
+        new_policy = {"identity:list_domains": []}
+        self._set_policy(new_policy)
+
+        my_url = '/domains?enableds=0&name=%s' % self.domainA['name']
+        r = self.get(my_url, auth=self.auth)
+        id_list = self._get_id_list_from_ref_list(r.result.get('domains'))
+
+        # domainA is returned and it is enabled, since enableds=0 is not the
+        # same as enabled=0
+        self.assertEqual(1, len(id_list))
+        self.assertIn(self.domainA['id'], id_list)
+        self.assertIs(True, r.result.get('domains')[0]['enabled'])
+
+    def test_list_users_filtered_by_funny_name(self):
+        """GET /users?name=%myname%
+
+        Test Plan:
+
+        - Update policy so api is unprotected
+        - Update a user with name that has filter escape characters
+        - Ensure we can filter on it
+
+        """
+        self._set_policy({"identity:list_users": []})
+        user = self.user1
+        user['name'] = '%my%name%'
+        self.identity_api.update_user(user['id'], user)
+
+        url_by_name = '/users?name=%my%name%'
+        r = self.get(url_by_name, auth=self.auth)
+
+        self.assertEqual(1, len(r.result.get('users')))
+        self.assertEqual(user['id'], r.result.get('users')[0]['id'])
+
+    def test_inexact_filters(self):
+        # Create 20 users
+        user_list = self._create_test_data('user', 20)
+        # Set up some names that we can filter on
+        user = user_list[5]
+        user['name'] = 'The'
+        self.identity_api.update_user(user['id'], user)
+        user = user_list[6]
+        user['name'] = 'The Ministry'
+        self.identity_api.update_user(user['id'], user)
+        user = user_list[7]
+        user['name'] = 'The Ministry of'
+        self.identity_api.update_user(user['id'], user)
+        user = user_list[8]
+        user['name'] = 'The Ministry of Silly'
+        self.identity_api.update_user(user['id'], user)
+        user = user_list[9]
+        user['name'] = 'The Ministry of Silly Walks'
+        self.identity_api.update_user(user['id'], user)
+        # ...and one for useful case insensitivity testing
+        user = user_list[10]
+        user['name'] = 'the ministry of silly walks OF'
+        self.identity_api.update_user(user['id'], user)
+
+        self._set_policy({"identity:list_users": []})
+
+        url_by_name = '/users?name__contains=Ministry'
+        r = self.get(url_by_name, auth=self.auth)
+        self.assertEqual(4, len(r.result.get('users')))
+        self._match_with_list(r.result.get('users'), user_list,
+                              list_start=6, list_end=10)
+
+        url_by_name = '/users?name__icontains=miNIstry'
+        r = self.get(url_by_name, auth=self.auth)
+        self.assertEqual(5, len(r.result.get('users')))
+        self._match_with_list(r.result.get('users'), user_list,
+                              list_start=6, list_end=11)
+
+        url_by_name = '/users?name__startswith=The'
+        r = self.get(url_by_name, auth=self.auth)
+        self.assertEqual(5, len(r.result.get('users')))
+        self._match_with_list(r.result.get('users'), user_list,
+                              list_start=5, list_end=10)
+
+        url_by_name = '/users?name__istartswith=the'
+        r = self.get(url_by_name, auth=self.auth)
+        self.assertEqual(6, len(r.result.get('users')))
+        self._match_with_list(r.result.get('users'), user_list,
+                              list_start=5, list_end=11)
+
+        url_by_name = '/users?name__endswith=of'
+        r = self.get(url_by_name, auth=self.auth)
+        self.assertEqual(1, len(r.result.get('users')))
+        self.assertEqual(r.result.get('users')[0]['id'], user_list[7]['id'])
+
+        url_by_name = '/users?name__iendswith=OF'
+        r = self.get(url_by_name, auth=self.auth)
+        self.assertEqual(2, len(r.result.get('users')))
+        self.assertEqual(user_list[7]['id'], r.result.get('users')[0]['id'])
+        self.assertEqual(user_list[10]['id'], r.result.get('users')[1]['id'])
+
+        self._delete_test_data('user', user_list)
+
+    def test_filter_sql_injection_attack(self):
+        """GET /users?name=<injected sql_statement>
+
+        Test Plan:
+
+        - Attempt to get all entities back by passing a two-term attribute
+        - Attempt to piggyback filter to damage DB (e.g. drop table)
+
+        """
+        self._set_policy({"identity:list_users": [],
+                          "identity:list_groups": [],
+                          "identity:create_group": []})
+
+        url_by_name = "/users?name=anything' or 'x'='x"
+        r = self.get(url_by_name, auth=self.auth)
+
+        self.assertEqual(0, len(r.result.get('users')))
+
+        # See if we can add a SQL command...use the group table instead of the
+        # user table since 'user' is reserved word for SQLAlchemy.
+        group = self.new_group_ref(domain_id=self.domainB['id'])
+        group = self.identity_api.create_group(group)
+
+        url_by_name = "/users?name=x'; drop table group"
+        r = self.get(url_by_name, auth=self.auth)
+
+        # Check group table is still there...
+        url_by_name = "/groups"
+        r = self.get(url_by_name, auth=self.auth)
+        self.assertTrue(len(r.result.get('groups')) > 0)
+
+
+class IdentityTestListLimitCase(IdentityTestFilteredCase):
+    """Test list limiting enforcement on the v3 Identity API."""
+    content_type = 'json'
+
+    def setUp(self):
+        """Setup for Identity Limit Test Cases."""
+
+        super(IdentityTestListLimitCase, self).setUp()
+
+        self._set_policy({"identity:list_users": [],
+                          "identity:list_groups": [],
+                          "identity:list_projects": [],
+                          "identity:list_services": [],
+                          "identity:list_policies": []})
+
+        # Create 10 entries for each of the entities we are going to test
+        self.ENTITY_TYPES = ['user', 'group', 'project']
+        self.entity_lists = {}
+        for entity in self.ENTITY_TYPES:
+            self.entity_lists[entity] = self._create_test_data(entity, 10)
+            # Make sure we clean up when finished
+            self.addCleanup(self.clean_up_entity, entity)
+
+        self.service_list = []
+        self.addCleanup(self.clean_up_service)
+        for _ in range(10):
+            new_entity = {'id': uuid.uuid4().hex, 'type': uuid.uuid4().hex}
+            service = self.catalog_api.create_service(new_entity['id'],
+                                                      new_entity)
+            self.service_list.append(service)
+
+        self.policy_list = []
+        self.addCleanup(self.clean_up_policy)
+        for _ in range(10):
+            new_entity = {'id': uuid.uuid4().hex, 'type': uuid.uuid4().hex,
+                          'blob': uuid.uuid4().hex}
+            policy = self.policy_api.create_policy(new_entity['id'],
+                                                   new_entity)
+            self.policy_list.append(policy)
+
+    def clean_up_entity(self, entity):
+        """Clean up entity test data from Identity Limit Test Cases."""
+
+        self._delete_test_data(entity, self.entity_lists[entity])
+
+    def clean_up_service(self):
+        """Clean up service test data from Identity Limit Test Cases."""
+
+        for service in self.service_list:
+            self.catalog_api.delete_service(service['id'])
+
+    def clean_up_policy(self):
+        """Clean up policy test data from Identity Limit Test Cases."""
+
+        for policy in self.policy_list:
+            self.policy_api.delete_policy(policy['id'])
+
+    def _test_entity_list_limit(self, entity, driver):
+        """GET /<entities> (limited)
+
+        Test Plan:
+
+        - For the specified type of entity:
+            - Update policy for no protection on api
+            - Add a bunch of entities
+            - Set the global list limit to 5, and check that getting all
+            - entities only returns 5
+            - Set the driver list_limit to 4, and check that now only 4 are
+            - returned
+
+        """
+        if entity == 'policy':
+            plural = 'policies'
+        else:
+            plural = '%ss' % entity
+
+        self.config_fixture.config(list_limit=5)
+        self.config_fixture.config(group=driver, list_limit=None)
+        r = self.get('/%s' % plural, auth=self.auth)
+        self.assertEqual(5, len(r.result.get(plural)))
+        self.assertIs(r.result.get('truncated'), True)
+
+        self.config_fixture.config(group=driver, list_limit=4)
+        r = self.get('/%s' % plural, auth=self.auth)
+        self.assertEqual(4, len(r.result.get(plural)))
+        self.assertIs(r.result.get('truncated'), True)
+
+    def test_users_list_limit(self):
+        self._test_entity_list_limit('user', 'identity')
+
+    def test_groups_list_limit(self):
+        self._test_entity_list_limit('group', 'identity')
+
+    def test_projects_list_limit(self):
+        self._test_entity_list_limit('project', 'resource')
+
+    def test_services_list_limit(self):
+        self._test_entity_list_limit('service', 'catalog')
+
+    def test_non_driver_list_limit(self):
+        """Check list can be limited without driver level support.
+
+        Policy limiting is not done at the driver level (since it
+        really isn't worth doing it there).  So use this as a test
+        for ensuring the controller level will successfully limit
+        in this case.
+
+        """
+        self._test_entity_list_limit('policy', 'policy')
+
+    def test_no_limit(self):
+        """Check truncated attribute not set when list not limited."""
+
+        r = self.get('/services', auth=self.auth)
+        self.assertEqual(10, len(r.result.get('services')))
+        self.assertIsNone(r.result.get('truncated'))
+
+    def test_at_limit(self):
+        """Check truncated attribute not set when list at max size."""
+
+        # Test this by overriding the general limit with a higher
+        # driver-specific limit (allowing all entities to be returned
+        # in the collection), which should result in a non truncated list
+        self.config_fixture.config(list_limit=5)
+        self.config_fixture.config(group='catalog', list_limit=10)
+        r = self.get('/services', auth=self.auth)
+        self.assertEqual(10, len(r.result.get('services')))
+        self.assertIsNone(r.result.get('truncated'))
diff --git a/keystone-moon/keystone/tests/unit/test_v3_identity.py b/keystone-moon/keystone/tests/unit/test_v3_identity.py
new file mode 100644 (file)
index 0000000..ac07729
--- /dev/null
@@ -0,0 +1,584 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from oslo_config import cfg
+from testtools import matchers
+
+from keystone.common import controller
+from keystone import exception
+from keystone.tests import unit as tests
+from keystone.tests.unit import test_v3
+
+
+CONF = cfg.CONF
+
+
+class IdentityTestCase(test_v3.RestfulTestCase):
+    """Test users and groups."""
+
+    def setUp(self):
+        super(IdentityTestCase, self).setUp()
+
+        self.group = self.new_group_ref(
+            domain_id=self.domain_id)
+        self.group = self.identity_api.create_group(self.group)
+        self.group_id = self.group['id']
+
+        self.credential_id = uuid.uuid4().hex
+        self.credential = self.new_credential_ref(
+            user_id=self.user['id'],
+            project_id=self.project_id)
+        self.credential['id'] = self.credential_id
+        self.credential_api.create_credential(
+            self.credential_id,
+            self.credential)
+
+    # user crud tests
+
+    def test_create_user(self):
+        """Call ``POST /users``."""
+        ref = self.new_user_ref(domain_id=self.domain_id)
+        r = self.post(
+            '/users',
+            body={'user': ref})
+        return self.assertValidUserResponse(r, ref)
+
+    def test_create_user_without_domain(self):
+        """Call ``POST /users`` without specifying domain.
+
+        According to the identity-api specification, if you do not
+        explicitly specific the domain_id in the entity, it should
+        take the domain scope of the token as the domain_id.
+
+        """
+        # Create a user with a role on the domain so we can get a
+        # domain scoped token
+        domain = self.new_domain_ref()
+        self.resource_api.create_domain(domain['id'], domain)
+        user = self.new_user_ref(domain_id=domain['id'])
+        password = user['password']
+        user = self.identity_api.create_user(user)
+        user['password'] = password
+        self.assignment_api.create_grant(
+            role_id=self.role_id, user_id=user['id'],
+            domain_id=domain['id'])
+
+        ref = self.new_user_ref(domain_id=domain['id'])
+        ref_nd = ref.copy()
+        ref_nd.pop('domain_id')
+        auth = self.build_authentication_request(
+            user_id=user['id'],
+            password=user['password'],
+            domain_id=domain['id'])
+        r = self.post('/users', body={'user': ref_nd}, auth=auth)
+        self.assertValidUserResponse(r, ref)
+
+        # Now try the same thing without a domain token - which should fail
+        ref = self.new_user_ref(domain_id=domain['id'])
+        ref_nd = ref.copy()
+        ref_nd.pop('domain_id')
+        auth = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=self.project['id'])
+        r = self.post('/users', body={'user': ref_nd}, auth=auth)
+        # TODO(henry-nash): Due to bug #1283539 we currently automatically
+        # use the default domain_id if a domain scoped token is not being
+        # used. Change the code below to expect a failure once this bug is
+        # fixed.
+        ref['domain_id'] = CONF.identity.default_domain_id
+        return self.assertValidUserResponse(r, ref)
+
+    def test_create_user_400(self):
+        """Call ``POST /users``."""
+        self.post('/users', body={'user': {}}, expected_status=400)
+
+    def test_list_users(self):
+        """Call ``GET /users``."""
+        resource_url = '/users'
+        r = self.get(resource_url)
+        self.assertValidUserListResponse(r, ref=self.user,
+                                         resource_url=resource_url)
+
+    def test_list_users_with_multiple_backends(self):
+        """Call ``GET /users`` when multiple backends is enabled.
+
+        In this scenario, the controller requires a domain to be specified
+        either as a filter or by using a domain scoped token.
+
+        """
+        self.config_fixture.config(group='identity',
+                                   domain_specific_drivers_enabled=True)
+
+        # Create a user with a role on the domain so we can get a
+        # domain scoped token
+        domain = self.new_domain_ref()
+        self.resource_api.create_domain(domain['id'], domain)
+        user = self.new_user_ref(domain_id=domain['id'])
+        password = user['password']
+        user = self.identity_api.create_user(user)
+        user['password'] = password
+        self.assignment_api.create_grant(
+            role_id=self.role_id, user_id=user['id'],
+            domain_id=domain['id'])
+
+        ref = self.new_user_ref(domain_id=domain['id'])
+        ref_nd = ref.copy()
+        ref_nd.pop('domain_id')
+        auth = self.build_authentication_request(
+            user_id=user['id'],
+            password=user['password'],
+            domain_id=domain['id'])
+
+        # First try using a domain scoped token
+        resource_url = '/users'
+        r = self.get(resource_url, auth=auth)
+        self.assertValidUserListResponse(r, ref=user,
+                                         resource_url=resource_url)
+
+        # Now try with an explicit filter
+        resource_url = ('/users?domain_id=%(domain_id)s' %
+                        {'domain_id': domain['id']})
+        r = self.get(resource_url)
+        self.assertValidUserListResponse(r, ref=user,
+                                         resource_url=resource_url)
+
+        # Now try the same thing without a domain token or filter,
+        # which should fail
+        r = self.get('/users', expected_status=exception.Unauthorized.code)
+
+    def test_list_users_with_static_admin_token_and_multiple_backends(self):
+        # domain-specific operations with the bootstrap ADMIN token is
+        # disallowed when domain-specific drivers are enabled
+        self.config_fixture.config(group='identity',
+                                   domain_specific_drivers_enabled=True)
+        self.get('/users', token=CONF.admin_token,
+                 expected_status=exception.Unauthorized.code)
+
+    def test_list_users_no_default_project(self):
+        """Call ``GET /users`` making sure no default_project_id."""
+        user = self.new_user_ref(self.domain_id)
+        user = self.identity_api.create_user(user)
+        resource_url = '/users'
+        r = self.get(resource_url)
+        self.assertValidUserListResponse(r, ref=user,
+                                         resource_url=resource_url)
+
+    def test_get_user(self):
+        """Call ``GET /users/{user_id}``."""
+        r = self.get('/users/%(user_id)s' % {
+            'user_id': self.user['id']})
+        self.assertValidUserResponse(r, self.user)
+
+    def test_get_user_with_default_project(self):
+        """Call ``GET /users/{user_id}`` making sure of default_project_id."""
+        user = self.new_user_ref(domain_id=self.domain_id,
+                                 project_id=self.project_id)
+        user = self.identity_api.create_user(user)
+        r = self.get('/users/%(user_id)s' % {'user_id': user['id']})
+        self.assertValidUserResponse(r, user)
+
+    def test_add_user_to_group(self):
+        """Call ``PUT /groups/{group_id}/users/{user_id}``."""
+        self.put('/groups/%(group_id)s/users/%(user_id)s' % {
+            'group_id': self.group_id, 'user_id': self.user['id']})
+
+    def test_list_groups_for_user(self):
+        """Call ``GET /users/{user_id}/groups``."""
+
+        self.user1 = self.new_user_ref(
+            domain_id=self.domain['id'])
+        password = self.user1['password']
+        self.user1 = self.identity_api.create_user(self.user1)
+        self.user1['password'] = password
+        self.user2 = self.new_user_ref(
+            domain_id=self.domain['id'])
+        password = self.user2['password']
+        self.user2 = self.identity_api.create_user(self.user2)
+        self.user2['password'] = password
+        self.put('/groups/%(group_id)s/users/%(user_id)s' % {
+            'group_id': self.group_id, 'user_id': self.user1['id']})
+
+        # Scenarios below are written to test the default policy configuration
+
+        # One should be allowed to list one's own groups
+        auth = self.build_authentication_request(
+            user_id=self.user1['id'],
+            password=self.user1['password'])
+        resource_url = ('/users/%(user_id)s/groups' %
+                        {'user_id': self.user1['id']})
+        r = self.get(resource_url, auth=auth)
+        self.assertValidGroupListResponse(r, ref=self.group,
+                                          resource_url=resource_url)
+
+        # Administrator is allowed to list others' groups
+        resource_url = ('/users/%(user_id)s/groups' %
+                        {'user_id': self.user1['id']})
+        r = self.get(resource_url)
+        self.assertValidGroupListResponse(r, ref=self.group,
+                                          resource_url=resource_url)
+
+        # Ordinary users should not be allowed to list other's groups
+        auth = self.build_authentication_request(
+            user_id=self.user2['id'],
+            password=self.user2['password'])
+        r = self.get('/users/%(user_id)s/groups' % {
+            'user_id': self.user1['id']}, auth=auth,
+            expected_status=exception.ForbiddenAction.code)
+
+    def test_check_user_in_group(self):
+        """Call ``HEAD /groups/{group_id}/users/{user_id}``."""
+        self.put('/groups/%(group_id)s/users/%(user_id)s' % {
+            'group_id': self.group_id, 'user_id': self.user['id']})
+        self.head('/groups/%(group_id)s/users/%(user_id)s' % {
+            'group_id': self.group_id, 'user_id': self.user['id']})
+
+    def test_list_users_in_group(self):
+        """Call ``GET /groups/{group_id}/users``."""
+        self.put('/groups/%(group_id)s/users/%(user_id)s' % {
+            'group_id': self.group_id, 'user_id': self.user['id']})
+        resource_url = ('/groups/%(group_id)s/users' %
+                        {'group_id': self.group_id})
+        r = self.get(resource_url)
+        self.assertValidUserListResponse(r, ref=self.user,
+                                         resource_url=resource_url)
+        self.assertIn('/groups/%(group_id)s/users' % {
+            'group_id': self.group_id}, r.result['links']['self'])
+
+    def test_remove_user_from_group(self):
+        """Call ``DELETE /groups/{group_id}/users/{user_id}``."""
+        self.put('/groups/%(group_id)s/users/%(user_id)s' % {
+            'group_id': self.group_id, 'user_id': self.user['id']})
+        self.delete('/groups/%(group_id)s/users/%(user_id)s' % {
+            'group_id': self.group_id, 'user_id': self.user['id']})
+
+    def test_update_user(self):
+        """Call ``PATCH /users/{user_id}``."""
+        user = self.new_user_ref(domain_id=self.domain_id)
+        del user['id']
+        r = self.patch('/users/%(user_id)s' % {
+            'user_id': self.user['id']},
+            body={'user': user})
+        self.assertValidUserResponse(r, user)
+
+    def test_admin_password_reset(self):
+        # bootstrap a user as admin
+        user_ref = self.new_user_ref(domain_id=self.domain['id'])
+        password = user_ref['password']
+        user_ref = self.identity_api.create_user(user_ref)
+
+        # auth as user should work before a password change
+        old_password_auth = self.build_authentication_request(
+            user_id=user_ref['id'],
+            password=password)
+        r = self.v3_authenticate_token(old_password_auth, expected_status=201)
+        old_token = r.headers.get('X-Subject-Token')
+
+        # auth as user with a token should work before a password change
+        old_token_auth = self.build_authentication_request(token=old_token)
+        self.v3_authenticate_token(old_token_auth, expected_status=201)
+
+        # administrative password reset
+        new_password = uuid.uuid4().hex
+        self.patch('/users/%s' % user_ref['id'],
+                   body={'user': {'password': new_password}},
+                   expected_status=200)
+
+        # auth as user with original password should not work after change
+        self.v3_authenticate_token(old_password_auth, expected_status=401)
+
+        # auth as user with an old token should not work after change
+        self.v3_authenticate_token(old_token_auth, expected_status=404)
+
+        # new password should work
+        new_password_auth = self.build_authentication_request(
+            user_id=user_ref['id'],
+            password=new_password)
+        self.v3_authenticate_token(new_password_auth, expected_status=201)
+
+    def test_update_user_domain_id(self):
+        """Call ``PATCH /users/{user_id}`` with domain_id."""
+        user = self.new_user_ref(domain_id=self.domain['id'])
+        user = self.identity_api.create_user(user)
+        user['domain_id'] = CONF.identity.default_domain_id
+        r = self.patch('/users/%(user_id)s' % {
+            'user_id': user['id']},
+            body={'user': user},
+            expected_status=exception.ValidationError.code)
+        self.config_fixture.config(domain_id_immutable=False)
+        user['domain_id'] = self.domain['id']
+        r = self.patch('/users/%(user_id)s' % {
+            'user_id': user['id']},
+            body={'user': user})
+        self.assertValidUserResponse(r, user)
+
+    def test_delete_user(self):
+        """Call ``DELETE /users/{user_id}``.
+
+        As well as making sure the delete succeeds, we ensure
+        that any credentials that reference this user are
+        also deleted, while other credentials are unaffected.
+        In addition, no tokens should remain valid for this user.
+
+        """
+        # First check the credential for this user is present
+        r = self.credential_api.get_credential(self.credential['id'])
+        self.assertDictEqual(r, self.credential)
+        # Create a second credential with a different user
+        self.user2 = self.new_user_ref(
+            domain_id=self.domain['id'],
+            project_id=self.project['id'])
+        self.user2 = self.identity_api.create_user(self.user2)
+        self.credential2 = self.new_credential_ref(
+            user_id=self.user2['id'],
+            project_id=self.project['id'])
+        self.credential_api.create_credential(
+            self.credential2['id'],
+            self.credential2)
+        # Create a token for this user which we can check later
+        # gets deleted
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            project_id=self.project['id'])
+        token = self.get_requested_token(auth_data)
+        # Confirm token is valid for now
+        self.head('/auth/tokens',
+                  headers={'X-Subject-Token': token},
+                  expected_status=200)
+
+        # Now delete the user
+        self.delete('/users/%(user_id)s' % {
+            'user_id': self.user['id']})
+
+        # Deleting the user should have deleted any credentials
+        # that reference this project
+        self.assertRaises(exception.CredentialNotFound,
+                          self.credential_api.get_credential,
+                          self.credential['id'])
+        # And the no tokens we remain valid
+        tokens = self.token_provider_api._persistence._list_tokens(
+            self.user['id'])
+        self.assertEqual(0, len(tokens))
+        # But the credential for user2 is unaffected
+        r = self.credential_api.get_credential(self.credential2['id'])
+        self.assertDictEqual(r, self.credential2)
+
+    # group crud tests
+
+    def test_create_group(self):
+        """Call ``POST /groups``."""
+        ref = self.new_group_ref(domain_id=self.domain_id)
+        r = self.post(
+            '/groups',
+            body={'group': ref})
+        return self.assertValidGroupResponse(r, ref)
+
+    def test_create_group_400(self):
+        """Call ``POST /groups``."""
+        self.post('/groups', body={'group': {}}, expected_status=400)
+
+    def test_list_groups(self):
+        """Call ``GET /groups``."""
+        resource_url = '/groups'
+        r = self.get(resource_url)
+        self.assertValidGroupListResponse(r, ref=self.group,
+                                          resource_url=resource_url)
+
+    def test_get_group(self):
+        """Call ``GET /groups/{group_id}``."""
+        r = self.get('/groups/%(group_id)s' % {
+            'group_id': self.group_id})
+        self.assertValidGroupResponse(r, self.group)
+
+    def test_update_group(self):
+        """Call ``PATCH /groups/{group_id}``."""
+        group = self.new_group_ref(domain_id=self.domain_id)
+        del group['id']
+        r = self.patch('/groups/%(group_id)s' % {
+            'group_id': self.group_id},
+            body={'group': group})
+        self.assertValidGroupResponse(r, group)
+
+    def test_update_group_domain_id(self):
+        """Call ``PATCH /groups/{group_id}`` with domain_id."""
+        group = self.new_group_ref(domain_id=self.domain['id'])
+        group = self.identity_api.create_group(group)
+        group['domain_id'] = CONF.identity.default_domain_id
+        r = self.patch('/groups/%(group_id)s' % {
+            'group_id': group['id']},
+            body={'group': group},
+            expected_status=exception.ValidationError.code)
+        self.config_fixture.config(domain_id_immutable=False)
+        group['domain_id'] = self.domain['id']
+        r = self.patch('/groups/%(group_id)s' % {
+            'group_id': group['id']},
+            body={'group': group})
+        self.assertValidGroupResponse(r, group)
+
+    def test_delete_group(self):
+        """Call ``DELETE /groups/{group_id}``."""
+        self.delete('/groups/%(group_id)s' % {
+            'group_id': self.group_id})
+
+
+class IdentityV3toV2MethodsTestCase(tests.TestCase):
+    """Test users V3 to V2 conversion methods."""
+
+    def setUp(self):
+        super(IdentityV3toV2MethodsTestCase, self).setUp()
+        self.load_backends()
+        self.user_id = uuid.uuid4().hex
+        self.default_project_id = uuid.uuid4().hex
+        self.tenant_id = uuid.uuid4().hex
+        self.domain_id = uuid.uuid4().hex
+        # User with only default_project_id in ref
+        self.user1 = {'id': self.user_id,
+                      'name': self.user_id,
+                      'default_project_id': self.default_project_id,
+                      'domain_id': self.domain_id}
+        # User without default_project_id or tenantId in ref
+        self.user2 = {'id': self.user_id,
+                      'name': self.user_id,
+                      'domain_id': self.domain_id}
+        # User with both tenantId and default_project_id in ref
+        self.user3 = {'id': self.user_id,
+                      'name': self.user_id,
+                      'default_project_id': self.default_project_id,
+                      'tenantId': self.tenant_id,
+                      'domain_id': self.domain_id}
+        # User with only tenantId in ref
+        self.user4 = {'id': self.user_id,
+                      'name': self.user_id,
+                      'tenantId': self.tenant_id,
+                      'domain_id': self.domain_id}
+
+        # Expected result if the user is meant to have a tenantId element
+        self.expected_user = {'id': self.user_id,
+                              'name': self.user_id,
+                              'username': self.user_id,
+                              'tenantId': self.default_project_id}
+
+        # Expected result if the user is not meant to have a tenantId element
+        self.expected_user_no_tenant_id = {'id': self.user_id,
+                                           'name': self.user_id,
+                                           'username': self.user_id}
+
+    def test_v3_to_v2_user_method(self):
+
+        updated_user1 = controller.V2Controller.v3_to_v2_user(self.user1)
+        self.assertIs(self.user1, updated_user1)
+        self.assertDictEqual(self.user1, self.expected_user)
+        updated_user2 = controller.V2Controller.v3_to_v2_user(self.user2)
+        self.assertIs(self.user2, updated_user2)
+        self.assertDictEqual(self.user2, self.expected_user_no_tenant_id)
+        updated_user3 = controller.V2Controller.v3_to_v2_user(self.user3)
+        self.assertIs(self.user3, updated_user3)
+        self.assertDictEqual(self.user3, self.expected_user)
+        updated_user4 = controller.V2Controller.v3_to_v2_user(self.user4)
+        self.assertIs(self.user4, updated_user4)
+        self.assertDictEqual(self.user4, self.expected_user_no_tenant_id)
+
+    def test_v3_to_v2_user_method_list(self):
+        user_list = [self.user1, self.user2, self.user3, self.user4]
+        updated_list = controller.V2Controller.v3_to_v2_user(user_list)
+
+        self.assertEqual(len(updated_list), len(user_list))
+
+        for i, ref in enumerate(updated_list):
+            # Order should not change.
+            self.assertIs(ref, user_list[i])
+
+        self.assertDictEqual(self.user1, self.expected_user)
+        self.assertDictEqual(self.user2, self.expected_user_no_tenant_id)
+        self.assertDictEqual(self.user3, self.expected_user)
+        self.assertDictEqual(self.user4, self.expected_user_no_tenant_id)
+
+
+class UserSelfServiceChangingPasswordsTestCase(test_v3.RestfulTestCase):
+
+    def setUp(self):
+        super(UserSelfServiceChangingPasswordsTestCase, self).setUp()
+        self.user_ref = self.new_user_ref(domain_id=self.domain['id'])
+        password = self.user_ref['password']
+        self.user_ref = self.identity_api.create_user(self.user_ref)
+        self.user_ref['password'] = password
+        self.token = self.get_request_token(self.user_ref['password'], 201)
+
+    def get_request_token(self, password, expected_status):
+        auth_data = self.build_authentication_request(
+            user_id=self.user_ref['id'],
+            password=password)
+        r = self.v3_authenticate_token(auth_data,
+                                       expected_status=expected_status)
+        return r.headers.get('X-Subject-Token')
+
+    def change_password(self, expected_status, **kwargs):
+        """Returns a test response for a change password request."""
+        return self.post('/users/%s/password' % self.user_ref['id'],
+                         body={'user': kwargs},
+                         token=self.token,
+                         expected_status=expected_status)
+
+    def test_changing_password(self):
+        # original password works
+        token_id = self.get_request_token(self.user_ref['password'],
+                                          expected_status=201)
+        # original token works
+        old_token_auth = self.build_authentication_request(token=token_id)
+        self.v3_authenticate_token(old_token_auth, expected_status=201)
+
+        # change password
+        new_password = uuid.uuid4().hex
+        self.change_password(password=new_password,
+                             original_password=self.user_ref['password'],
+                             expected_status=204)
+
+        # old password fails
+        self.get_request_token(self.user_ref['password'], expected_status=401)
+
+        # old token fails
+        self.v3_authenticate_token(old_token_auth, expected_status=404)
+
+        # new password works
+        self.get_request_token(new_password, expected_status=201)
+
+    def test_changing_password_with_missing_original_password_fails(self):
+        r = self.change_password(password=uuid.uuid4().hex,
+                                 expected_status=400)
+        self.assertThat(r.result['error']['message'],
+                        matchers.Contains('original_password'))
+
+    def test_changing_password_with_missing_password_fails(self):
+        r = self.change_password(original_password=self.user_ref['password'],
+                                 expected_status=400)
+        self.assertThat(r.result['error']['message'],
+                        matchers.Contains('password'))
+
+    def test_changing_password_with_incorrect_password_fails(self):
+        self.change_password(password=uuid.uuid4().hex,
+                             original_password=uuid.uuid4().hex,
+                             expected_status=401)
+
+    def test_changing_password_with_disabled_user_fails(self):
+        # disable the user account
+        self.user_ref['enabled'] = False
+        self.patch('/users/%s' % self.user_ref['id'],
+                   body={'user': self.user_ref})
+
+        self.change_password(password=uuid.uuid4().hex,
+                             original_password=self.user_ref['password'],
+                             expected_status=401)
diff --git a/keystone-moon/keystone/tests/unit/test_v3_oauth1.py b/keystone-moon/keystone/tests/unit/test_v3_oauth1.py
new file mode 100644 (file)
index 0000000..608162d
--- /dev/null
@@ -0,0 +1,891 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import uuid
+
+from oslo_config import cfg
+from oslo_serialization import jsonutils
+from pycadf import cadftaxonomy
+from six.moves import urllib
+
+from keystone.contrib import oauth1
+from keystone.contrib.oauth1 import controllers
+from keystone.contrib.oauth1 import core
+from keystone import exception
+from keystone.tests.unit.common import test_notifications
+from keystone.tests.unit.ksfixtures import temporaryfile
+from keystone.tests.unit import test_v3
+
+
+CONF = cfg.CONF
+
+
+class OAuth1Tests(test_v3.RestfulTestCase):
+
+    EXTENSION_NAME = 'oauth1'
+    EXTENSION_TO_ADD = 'oauth1_extension'
+
+    CONSUMER_URL = '/OS-OAUTH1/consumers'
+
+    def setUp(self):
+        super(OAuth1Tests, self).setUp()
+
+        # Now that the app has been served, we can query CONF values
+        self.base_url = 'http://localhost/v3'
+        self.controller = controllers.OAuthControllerV3()
+
+    def _create_single_consumer(self):
+        ref = {'description': uuid.uuid4().hex}
+        resp = self.post(
+            self.CONSUMER_URL,
+            body={'consumer': ref})
+        return resp.result['consumer']
+
+    def _create_request_token(self, consumer, project_id):
+        endpoint = '/OS-OAUTH1/request_token'
+        client = oauth1.Client(consumer['key'],
+                               client_secret=consumer['secret'],
+                               signature_method=oauth1.SIG_HMAC,
+                               callback_uri="oob")
+        headers = {'requested_project_id': project_id}
+        url, headers, body = client.sign(self.base_url + endpoint,
+                                         http_method='POST',
+                                         headers=headers)
+        return endpoint, headers
+
+    def _create_access_token(self, consumer, token):
+        endpoint = '/OS-OAUTH1/access_token'
+        client = oauth1.Client(consumer['key'],
+                               client_secret=consumer['secret'],
+                               resource_owner_key=token.key,
+                               resource_owner_secret=token.secret,
+                               signature_method=oauth1.SIG_HMAC,
+                               verifier=token.verifier)
+        url, headers, body = client.sign(self.base_url + endpoint,
+                                         http_method='POST')
+        headers.update({'Content-Type': 'application/json'})
+        return endpoint, headers
+
+    def _get_oauth_token(self, consumer, token):
+        client = oauth1.Client(consumer['key'],
+                               client_secret=consumer['secret'],
+                               resource_owner_key=token.key,
+                               resource_owner_secret=token.secret,
+                               signature_method=oauth1.SIG_HMAC)
+        endpoint = '/auth/tokens'
+        url, headers, body = client.sign(self.base_url + endpoint,
+                                         http_method='POST')
+        headers.update({'Content-Type': 'application/json'})
+        ref = {'auth': {'identity': {'oauth1': {}, 'methods': ['oauth1']}}}
+        return endpoint, headers, ref
+
+    def _authorize_request_token(self, request_id):
+        return '/OS-OAUTH1/authorize/%s' % (request_id)
+
+
+class ConsumerCRUDTests(OAuth1Tests):
+
+    def _consumer_create(self, description=None, description_flag=True,
+                         **kwargs):
+        if description_flag:
+            ref = {'description': description}
+        else:
+            ref = {}
+        if kwargs:
+            ref.update(kwargs)
+        resp = self.post(
+            self.CONSUMER_URL,
+            body={'consumer': ref})
+        consumer = resp.result['consumer']
+        consumer_id = consumer['id']
+        self.assertEqual(description, consumer['description'])
+        self.assertIsNotNone(consumer_id)
+        self.assertIsNotNone(consumer['secret'])
+        return consumer
+
+    def test_consumer_create(self):
+        description = uuid.uuid4().hex
+        self._consumer_create(description=description)
+
+    def test_consumer_create_none_desc_1(self):
+        self._consumer_create()
+
+    def test_consumer_create_none_desc_2(self):
+        self._consumer_create(description_flag=False)
+
+    def test_consumer_create_normalize_field(self):
+        # If create a consumer with a field with : or - in the name,
+        # the name is normalized by converting those chars to _.
+        field_name = 'some:weird-field'
+        field_value = uuid.uuid4().hex
+        extra_fields = {field_name: field_value}
+        consumer = self._consumer_create(**extra_fields)
+        normalized_field_name = 'some_weird_field'
+        self.assertEqual(field_value, consumer[normalized_field_name])
+
+    def test_consumer_delete(self):
+        consumer = self._create_single_consumer()
+        consumer_id = consumer['id']
+        resp = self.delete(self.CONSUMER_URL + '/%s' % consumer_id)
+        self.assertResponseStatus(resp, 204)
+
+    def test_consumer_get(self):
+        consumer = self._create_single_consumer()
+        consumer_id = consumer['id']
+        resp = self.get(self.CONSUMER_URL + '/%s' % consumer_id)
+        self_url = ['http://localhost/v3', self.CONSUMER_URL,
+                    '/', consumer_id]
+        self_url = ''.join(self_url)
+        self.assertEqual(self_url, resp.result['consumer']['links']['self'])
+        self.assertEqual(consumer_id, resp.result['consumer']['id'])
+
+    def test_consumer_list(self):
+        self._consumer_create()
+        resp = self.get(self.CONSUMER_URL)
+        entities = resp.result['consumers']
+        self.assertIsNotNone(entities)
+        self_url = ['http://localhost/v3', self.CONSUMER_URL]
+        self_url = ''.join(self_url)
+        self.assertEqual(self_url, resp.result['links']['self'])
+        self.assertValidListLinks(resp.result['links'])
+
+    def test_consumer_update(self):
+        consumer = self._create_single_consumer()
+        original_id = consumer['id']
+        original_description = consumer['description']
+        update_description = original_description + '_new'
+
+        update_ref = {'description': update_description}
+        update_resp = self.patch(self.CONSUMER_URL + '/%s' % original_id,
+                                 body={'consumer': update_ref})
+        consumer = update_resp.result['consumer']
+        self.assertEqual(update_description, consumer['description'])
+        self.assertEqual(original_id, consumer['id'])
+
+    def test_consumer_update_bad_secret(self):
+        consumer = self._create_single_consumer()
+        original_id = consumer['id']
+        update_ref = copy.deepcopy(consumer)
+        update_ref['description'] = uuid.uuid4().hex
+        update_ref['secret'] = uuid.uuid4().hex
+        self.patch(self.CONSUMER_URL + '/%s' % original_id,
+                   body={'consumer': update_ref},
+                   expected_status=400)
+
+    def test_consumer_update_bad_id(self):
+        consumer = self._create_single_consumer()
+        original_id = consumer['id']
+        original_description = consumer['description']
+        update_description = original_description + "_new"
+
+        update_ref = copy.deepcopy(consumer)
+        update_ref['description'] = update_description
+        update_ref['id'] = update_description
+        self.patch(self.CONSUMER_URL + '/%s' % original_id,
+                   body={'consumer': update_ref},
+                   expected_status=400)
+
+    def test_consumer_update_normalize_field(self):
+        # If update a consumer with a field with : or - in the name,
+        # the name is normalized by converting those chars to _.
+        field1_name = 'some:weird-field'
+        field1_orig_value = uuid.uuid4().hex
+
+        extra_fields = {field1_name: field1_orig_value}
+        consumer = self._consumer_create(**extra_fields)
+        consumer_id = consumer['id']
+
+        field1_new_value = uuid.uuid4().hex
+
+        field2_name = 'weird:some-field'
+        field2_value = uuid.uuid4().hex
+
+        update_ref = {field1_name: field1_new_value,
+                      field2_name: field2_value}
+
+        update_resp = self.patch(self.CONSUMER_URL + '/%s' % consumer_id,
+                                 body={'consumer': update_ref})
+        consumer = update_resp.result['consumer']
+
+        normalized_field1_name = 'some_weird_field'
+        self.assertEqual(field1_new_value, consumer[normalized_field1_name])
+
+        normalized_field2_name = 'weird_some_field'
+        self.assertEqual(field2_value, consumer[normalized_field2_name])
+
+    def test_consumer_create_no_description(self):
+        resp = self.post(self.CONSUMER_URL, body={'consumer': {}})
+        consumer = resp.result['consumer']
+        consumer_id = consumer['id']
+        self.assertIsNone(consumer['description'])
+        self.assertIsNotNone(consumer_id)
+        self.assertIsNotNone(consumer['secret'])
+
+    def test_consumer_get_bad_id(self):
+        self.get(self.CONSUMER_URL + '/%(consumer_id)s'
+                 % {'consumer_id': uuid.uuid4().hex},
+                 expected_status=404)
+
+
+class OAuthFlowTests(OAuth1Tests):
+
+    def auth_plugin_config_override(self):
+        methods = ['password', 'token', 'oauth1']
+        method_classes = {
+            'password': 'keystone.auth.plugins.password.Password',
+            'token': 'keystone.auth.plugins.token.Token',
+            'oauth1': 'keystone.auth.plugins.oauth1.OAuth',
+        }
+        super(OAuthFlowTests, self).auth_plugin_config_override(
+            methods, **method_classes)
+
+    def test_oauth_flow(self):
+        consumer = self._create_single_consumer()
+        consumer_id = consumer['id']
+        consumer_secret = consumer['secret']
+        self.consumer = {'key': consumer_id, 'secret': consumer_secret}
+        self.assertIsNotNone(self.consumer['secret'])
+
+        url, headers = self._create_request_token(self.consumer,
+                                                  self.project_id)
+        content = self.post(
+            url, headers=headers,
+            response_content_type='application/x-www-urlformencoded')
+        credentials = urllib.parse.parse_qs(content.result)
+        request_key = credentials['oauth_token'][0]
+        request_secret = credentials['oauth_token_secret'][0]
+        self.request_token = oauth1.Token(request_key, request_secret)
+        self.assertIsNotNone(self.request_token.key)
+
+        url = self._authorize_request_token(request_key)
+        body = {'roles': [{'id': self.role_id}]}
+        resp = self.put(url, body=body, expected_status=200)
+        self.verifier = resp.result['token']['oauth_verifier']
+        self.assertTrue(all(i in core.VERIFIER_CHARS for i in self.verifier))
+        self.assertEqual(8, len(self.verifier))
+
+        self.request_token.set_verifier(self.verifier)
+        url, headers = self._create_access_token(self.consumer,
+                                                 self.request_token)
+        content = self.post(
+            url, headers=headers,
+            response_content_type='application/x-www-urlformencoded')
+        credentials = urllib.parse.parse_qs(content.result)
+        access_key = credentials['oauth_token'][0]
+        access_secret = credentials['oauth_token_secret'][0]
+        self.access_token = oauth1.Token(access_key, access_secret)
+        self.assertIsNotNone(self.access_token.key)
+
+        url, headers, body = self._get_oauth_token(self.consumer,
+                                                   self.access_token)
+        content = self.post(url, headers=headers, body=body)
+        self.keystone_token_id = content.headers['X-Subject-Token']
+        self.keystone_token = content.result['token']
+        self.assertIsNotNone(self.keystone_token_id)
+
+
+class AccessTokenCRUDTests(OAuthFlowTests):
+    def test_delete_access_token_dne(self):
+        self.delete('/users/%(user)s/OS-OAUTH1/access_tokens/%(auth)s'
+                    % {'user': self.user_id,
+                       'auth': uuid.uuid4().hex},
+                    expected_status=404)
+
+    def test_list_no_access_tokens(self):
+        resp = self.get('/users/%(user_id)s/OS-OAUTH1/access_tokens'
+                        % {'user_id': self.user_id})
+        entities = resp.result['access_tokens']
+        self.assertEqual([], entities)
+        self.assertValidListLinks(resp.result['links'])
+
+    def test_get_single_access_token(self):
+        self.test_oauth_flow()
+        url = '/users/%(user_id)s/OS-OAUTH1/access_tokens/%(key)s' % {
+              'user_id': self.user_id,
+              'key': self.access_token.key
+        }
+        resp = self.get(url)
+        entity = resp.result['access_token']
+        self.assertEqual(self.access_token.key, entity['id'])
+        self.assertEqual(self.consumer['key'], entity['consumer_id'])
+        self.assertEqual('http://localhost/v3' + url, entity['links']['self'])
+
+    def test_get_access_token_dne(self):
+        self.get('/users/%(user_id)s/OS-OAUTH1/access_tokens/%(key)s'
+                 % {'user_id': self.user_id,
+                    'key': uuid.uuid4().hex},
+                 expected_status=404)
+
+    def test_list_all_roles_in_access_token(self):
+        self.test_oauth_flow()
+        resp = self.get('/users/%(id)s/OS-OAUTH1/access_tokens/%(key)s/roles'
+                        % {'id': self.user_id,
+                           'key': self.access_token.key})
+        entities = resp.result['roles']
+        self.assertTrue(entities)
+        self.assertValidListLinks(resp.result['links'])
+
+    def test_get_role_in_access_token(self):
+        self.test_oauth_flow()
+        url = ('/users/%(id)s/OS-OAUTH1/access_tokens/%(key)s/roles/%(role)s'
+               % {'id': self.user_id, 'key': self.access_token.key,
+                  'role': self.role_id})
+        resp = self.get(url)
+        entity = resp.result['role']
+        self.assertEqual(self.role_id, entity['id'])
+
+    def test_get_role_in_access_token_dne(self):
+        self.test_oauth_flow()
+        url = ('/users/%(id)s/OS-OAUTH1/access_tokens/%(key)s/roles/%(role)s'
+               % {'id': self.user_id, 'key': self.access_token.key,
+                  'role': uuid.uuid4().hex})
+        self.get(url, expected_status=404)
+
+    def test_list_and_delete_access_tokens(self):
+        self.test_oauth_flow()
+        # List access_tokens should be > 0
+        resp = self.get('/users/%(user_id)s/OS-OAUTH1/access_tokens'
+                        % {'user_id': self.user_id})
+        entities = resp.result['access_tokens']
+        self.assertTrue(entities)
+        self.assertValidListLinks(resp.result['links'])
+
+        # Delete access_token
+        resp = self.delete('/users/%(user)s/OS-OAUTH1/access_tokens/%(auth)s'
+                           % {'user': self.user_id,
+                              'auth': self.access_token.key})
+        self.assertResponseStatus(resp, 204)
+
+        # List access_token should be 0
+        resp = self.get('/users/%(user_id)s/OS-OAUTH1/access_tokens'
+                        % {'user_id': self.user_id})
+        entities = resp.result['access_tokens']
+        self.assertEqual([], entities)
+        self.assertValidListLinks(resp.result['links'])
+
+
+class AuthTokenTests(OAuthFlowTests):
+
+    def test_keystone_token_is_valid(self):
+        self.test_oauth_flow()
+        headers = {'X-Subject-Token': self.keystone_token_id,
+                   'X-Auth-Token': self.keystone_token_id}
+        r = self.get('/auth/tokens', headers=headers)
+        self.assertValidTokenResponse(r, self.user)
+
+        # now verify the oauth section
+        oauth_section = r.result['token']['OS-OAUTH1']
+        self.assertEqual(self.access_token.key,
+                         oauth_section['access_token_id'])
+        self.assertEqual(self.consumer['key'], oauth_section['consumer_id'])
+
+        # verify the roles section
+        roles_list = r.result['token']['roles']
+        # we can just verify the 0th role since we are only assigning one role
+        self.assertEqual(self.role_id, roles_list[0]['id'])
+
+        # verify that the token can perform delegated tasks
+        ref = self.new_user_ref(domain_id=self.domain_id)
+        r = self.admin_request(path='/v3/users', headers=headers,
+                               method='POST', body={'user': ref})
+        self.assertValidUserResponse(r, ref)
+
+    def test_delete_access_token_also_revokes_token(self):
+        self.test_oauth_flow()
+
+        # Delete access token
+        resp = self.delete('/users/%(user)s/OS-OAUTH1/access_tokens/%(auth)s'
+                           % {'user': self.user_id,
+                              'auth': self.access_token.key})
+        self.assertResponseStatus(resp, 204)
+
+        # Check Keystone Token no longer exists
+        headers = {'X-Subject-Token': self.keystone_token_id,
+                   'X-Auth-Token': self.keystone_token_id}
+        self.get('/auth/tokens', headers=headers,
+                 expected_status=404)
+
+    def test_deleting_consumer_also_deletes_tokens(self):
+        self.test_oauth_flow()
+
+        # Delete consumer
+        consumer_id = self.consumer['key']
+        resp = self.delete('/OS-OAUTH1/consumers/%(consumer_id)s'
+                           % {'consumer_id': consumer_id})
+        self.assertResponseStatus(resp, 204)
+
+        # List access_token should be 0
+        resp = self.get('/users/%(user_id)s/OS-OAUTH1/access_tokens'
+                        % {'user_id': self.user_id})
+        entities = resp.result['access_tokens']
+        self.assertEqual([], entities)
+
+        # Check Keystone Token no longer exists
+        headers = {'X-Subject-Token': self.keystone_token_id,
+                   'X-Auth-Token': self.keystone_token_id}
+        self.head('/auth/tokens', headers=headers,
+                  expected_status=404)
+
+    def test_change_user_password_also_deletes_tokens(self):
+        self.test_oauth_flow()
+
+        # delegated keystone token exists
+        headers = {'X-Subject-Token': self.keystone_token_id,
+                   'X-Auth-Token': self.keystone_token_id}
+        r = self.get('/auth/tokens', headers=headers)
+        self.assertValidTokenResponse(r, self.user)
+
+        user = {'password': uuid.uuid4().hex}
+        r = self.patch('/users/%(user_id)s' % {
+            'user_id': self.user['id']},
+            body={'user': user})
+
+        headers = {'X-Subject-Token': self.keystone_token_id,
+                   'X-Auth-Token': self.keystone_token_id}
+        self.admin_request(path='/auth/tokens', headers=headers,
+                           method='GET', expected_status=404)
+
+    def test_deleting_project_also_invalidates_tokens(self):
+        self.test_oauth_flow()
+
+        # delegated keystone token exists
+        headers = {'X-Subject-Token': self.keystone_token_id,
+                   'X-Auth-Token': self.keystone_token_id}
+        r = self.get('/auth/tokens', headers=headers)
+        self.assertValidTokenResponse(r, self.user)
+
+        r = self.delete('/projects/%(project_id)s' % {
+            'project_id': self.project_id})
+
+        headers = {'X-Subject-Token': self.keystone_token_id,
+                   'X-Auth-Token': self.keystone_token_id}
+        self.admin_request(path='/auth/tokens', headers=headers,
+                           method='GET', expected_status=404)
+
+    def test_token_chaining_is_not_allowed(self):
+        self.test_oauth_flow()
+
+        # attempt to re-authenticate (token chain) with the given token
+        path = '/v3/auth/tokens/'
+        auth_data = self.build_authentication_request(
+            token=self.keystone_token_id)
+
+        self.admin_request(
+            path=path,
+            body=auth_data,
+            token=self.keystone_token_id,
+            method='POST',
+            expected_status=403)
+
+    def test_delete_keystone_tokens_by_consumer_id(self):
+        self.test_oauth_flow()
+        self.token_provider_api._persistence.get_token(self.keystone_token_id)
+        self.token_provider_api._persistence.delete_tokens(
+            self.user_id,
+            consumer_id=self.consumer['key'])
+        self.assertRaises(exception.TokenNotFound,
+                          self.token_provider_api._persistence.get_token,
+                          self.keystone_token_id)
+
+    def _create_trust_get_token(self):
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.user_id,
+            project_id=self.project_id,
+            impersonation=True,
+            expires=dict(minutes=1),
+            role_ids=[self.role_id])
+        del ref['id']
+
+        r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+        trust = self.assertValidTrustResponse(r)
+
+        auth_data = self.build_authentication_request(
+            user_id=self.user['id'],
+            password=self.user['password'],
+            trust_id=trust['id'])
+
+        return self.get_requested_token(auth_data)
+
+    def _approve_request_token_url(self):
+        consumer = self._create_single_consumer()
+        consumer_id = consumer['id']
+        consumer_secret = consumer['secret']
+        self.consumer = {'key': consumer_id, 'secret': consumer_secret}
+        self.assertIsNotNone(self.consumer['secret'])
+
+        url, headers = self._create_request_token(self.consumer,
+                                                  self.project_id)
+        content = self.post(
+            url, headers=headers,
+            response_content_type='application/x-www-urlformencoded')
+        credentials = urllib.parse.parse_qs(content.result)
+        request_key = credentials['oauth_token'][0]
+        request_secret = credentials['oauth_token_secret'][0]
+        self.request_token = oauth1.Token(request_key, request_secret)
+        self.assertIsNotNone(self.request_token.key)
+
+        url = self._authorize_request_token(request_key)
+
+        return url
+
+    def test_oauth_token_cannot_create_new_trust(self):
+        self.test_oauth_flow()
+        ref = self.new_trust_ref(
+            trustor_user_id=self.user_id,
+            trustee_user_id=self.user_id,
+            project_id=self.project_id,
+            impersonation=True,
+            expires=dict(minutes=1),
+            role_ids=[self.role_id])
+        del ref['id']
+
+        self.post('/OS-TRUST/trusts',
+                  body={'trust': ref},
+                  token=self.keystone_token_id,
+                  expected_status=403)
+
+    def test_oauth_token_cannot_authorize_request_token(self):
+        self.test_oauth_flow()
+        url = self._approve_request_token_url()
+        body = {'roles': [{'id': self.role_id}]}
+        self.put(url, body=body, token=self.keystone_token_id,
+                 expected_status=403)
+
+    def test_oauth_token_cannot_list_request_tokens(self):
+        self._set_policy({"identity:list_access_tokens": [],
+                          "identity:create_consumer": [],
+                          "identity:authorize_request_token": []})
+        self.test_oauth_flow()
+        url = '/users/%s/OS-OAUTH1/access_tokens' % self.user_id
+        self.get(url, token=self.keystone_token_id,
+                 expected_status=403)
+
+    def _set_policy(self, new_policy):
+        self.tempfile = self.useFixture(temporaryfile.SecureTempFile())
+        self.tmpfilename = self.tempfile.file_name
+        self.config_fixture.config(group='oslo_policy',
+                                   policy_file=self.tmpfilename)
+        with open(self.tmpfilename, "w") as policyfile:
+            policyfile.write(jsonutils.dumps(new_policy))
+
+    def test_trust_token_cannot_authorize_request_token(self):
+        trust_token = self._create_trust_get_token()
+        url = self._approve_request_token_url()
+        body = {'roles': [{'id': self.role_id}]}
+        self.put(url, body=body, token=trust_token, expected_status=403)
+
+    def test_trust_token_cannot_list_request_tokens(self):
+        self._set_policy({"identity:list_access_tokens": [],
+                          "identity:create_trust": []})
+        trust_token = self._create_trust_get_token()
+        url = '/users/%s/OS-OAUTH1/access_tokens' % self.user_id
+        self.get(url, token=trust_token, expected_status=403)
+
+
+class MaliciousOAuth1Tests(OAuth1Tests):
+
+    def test_bad_consumer_secret(self):
+        consumer = self._create_single_consumer()
+        consumer_id = consumer['id']
+        consumer = {'key': consumer_id, 'secret': uuid.uuid4().hex}
+        url, headers = self._create_request_token(consumer, self.project_id)
+        self.post(url, headers=headers, expected_status=401)
+
+    def test_bad_request_token_key(self):
+        consumer = self._create_single_consumer()
+        consumer_id = consumer['id']
+        consumer_secret = consumer['secret']
+        consumer = {'key': consumer_id, 'secret': consumer_secret}
+        url, headers = self._create_request_token(consumer, self.project_id)
+        self.post(
+            url, headers=headers,
+            response_content_type='application/x-www-urlformencoded')
+        url = self._authorize_request_token(uuid.uuid4().hex)
+        body = {'roles': [{'id': self.role_id}]}
+        self.put(url, body=body, expected_status=404)
+
+    def test_bad_consumer_id(self):
+        consumer = self._create_single_consumer()
+        consumer_id = uuid.uuid4().hex
+        consumer_secret = consumer['secret']
+        consumer = {'key': consumer_id, 'secret': consumer_secret}
+        url, headers = self._create_request_token(consumer, self.project_id)
+        self.post(url, headers=headers, expected_status=404)
+
+    def test_bad_requested_project_id(self):
+        consumer = self._create_single_consumer()
+        consumer_id = consumer['id']
+        consumer_secret = consumer['secret']
+        consumer = {'key': consumer_id, 'secret': consumer_secret}
+        project_id = uuid.uuid4().hex
+        url, headers = self._create_request_token(consumer, project_id)
+        self.post(url, headers=headers, expected_status=404)
+
+    def test_bad_verifier(self):
+        consumer = self._create_single_consumer()
+        consumer_id = consumer['id']
+        consumer_secret = consumer['secret']
+        consumer = {'key': consumer_id, 'secret': consumer_secret}
+
+        url, headers = self._create_request_token(consumer, self.project_id)
+        content = self.post(
+            url, headers=headers,
+            response_content_type='application/x-www-urlformencoded')
+        credentials = urllib.parse.parse_qs(content.result)
+        request_key = credentials['oauth_token'][0]
+        request_secret = credentials['oauth_token_secret'][0]
+        request_token = oauth1.Token(request_key, request_secret)
+
+        url = self._authorize_request_token(request_key)
+        body = {'roles': [{'id': self.role_id}]}
+        resp = self.put(url, body=body, expected_status=200)
+        verifier = resp.result['token']['oauth_verifier']
+        self.assertIsNotNone(verifier)
+
+        request_token.set_verifier(uuid.uuid4().hex)
+        url, headers = self._create_access_token(consumer, request_token)
+        self.post(url, headers=headers, expected_status=401)
+
+    def test_bad_authorizing_roles(self):
+        consumer = self._create_single_consumer()
+        consumer_id = consumer['id']
+        consumer_secret = consumer['secret']
+        consumer = {'key': consumer_id, 'secret': consumer_secret}
+
+        url, headers = self._create_request_token(consumer, self.project_id)
+        content = self.post(
+            url, headers=headers,
+            response_content_type='application/x-www-urlformencoded')
+        credentials = urllib.parse.parse_qs(content.result)
+        request_key = credentials['oauth_token'][0]
+
+        self.assignment_api.remove_role_from_user_and_project(
+            self.user_id, self.project_id, self.role_id)
+        url = self._authorize_request_token(request_key)
+        body = {'roles': [{'id': self.role_id}]}
+        self.admin_request(path=url, method='PUT',
+                           body=body, expected_status=404)
+
+    def test_expired_authorizing_request_token(self):
+        self.config_fixture.config(group='oauth1', request_token_duration=-1)
+
+        consumer = self._create_single_consumer()
+        consumer_id = consumer['id']
+        consumer_secret = consumer['secret']
+        self.consumer = {'key': consumer_id, 'secret': consumer_secret}
+        self.assertIsNotNone(self.consumer['key'])
+
+        url, headers = self._create_request_token(self.consumer,
+                                                  self.project_id)
+        content = self.post(
+            url, headers=headers,
+            response_content_type='application/x-www-urlformencoded')
+        credentials = urllib.parse.parse_qs(content.result)
+        request_key = credentials['oauth_token'][0]
+        request_secret = credentials['oauth_token_secret'][0]
+        self.request_token = oauth1.Token(request_key, request_secret)
+        self.assertIsNotNone(self.request_token.key)
+
+        url = self._authorize_request_token(request_key)
+        body = {'roles': [{'id': self.role_id}]}
+        self.put(url, body=body, expected_status=401)
+
+    def test_expired_creating_keystone_token(self):
+        self.config_fixture.config(group='oauth1', access_token_duration=-1)
+        consumer = self._create_single_consumer()
+        consumer_id = consumer['id']
+        consumer_secret = consumer['secret']
+        self.consumer = {'key': consumer_id, 'secret': consumer_secret}
+        self.assertIsNotNone(self.consumer['key'])
+
+        url, headers = self._create_request_token(self.consumer,
+                                                  self.project_id)
+        content = self.post(
+            url, headers=headers,
+            response_content_type='application/x-www-urlformencoded')
+        credentials = urllib.parse.parse_qs(content.result)
+        request_key = credentials['oauth_token'][0]
+        request_secret = credentials['oauth_token_secret'][0]
+        self.request_token = oauth1.Token(request_key, request_secret)
+        self.assertIsNotNone(self.request_token.key)
+
+        url = self._authorize_request_token(request_key)
+        body = {'roles': [{'id': self.role_id}]}
+        resp = self.put(url, body=body, expected_status=200)
+        self.verifier = resp.result['token']['oauth_verifier']
+
+        self.request_token.set_verifier(self.verifier)
+        url, headers = self._create_access_token(self.consumer,
+                                                 self.request_token)
+        content = self.post(
+            url, headers=headers,
+            response_content_type='application/x-www-urlformencoded')
+        credentials = urllib.parse.parse_qs(content.result)
+        access_key = credentials['oauth_token'][0]
+        access_secret = credentials['oauth_token_secret'][0]
+        self.access_token = oauth1.Token(access_key, access_secret)
+        self.assertIsNotNone(self.access_token.key)
+
+        url, headers, body = self._get_oauth_token(self.consumer,
+                                                   self.access_token)
+        self.post(url, headers=headers, body=body, expected_status=401)
+
+    def test_missing_oauth_headers(self):
+        endpoint = '/OS-OAUTH1/request_token'
+        client = oauth1.Client(uuid.uuid4().hex,
+                               client_secret=uuid.uuid4().hex,
+                               signature_method=oauth1.SIG_HMAC,
+                               callback_uri="oob")
+        headers = {'requested_project_id': uuid.uuid4().hex}
+        _url, headers, _body = client.sign(self.base_url + endpoint,
+                                           http_method='POST',
+                                           headers=headers)
+
+        # NOTE(stevemar): To simulate this error, we remove the Authorization
+        # header from the post request.
+        del headers['Authorization']
+        self.post(endpoint, headers=headers, expected_status=500)
+
+
+class OAuthNotificationTests(OAuth1Tests,
+                             test_notifications.BaseNotificationTest):
+
+    def test_create_consumer(self):
+        consumer_ref = self._create_single_consumer()
+        self._assert_notify_sent(consumer_ref['id'],
+                                 test_notifications.CREATED_OPERATION,
+                                 'OS-OAUTH1:consumer')
+        self._assert_last_audit(consumer_ref['id'],
+                                test_notifications.CREATED_OPERATION,
+                                'OS-OAUTH1:consumer',
+                                cadftaxonomy.SECURITY_ACCOUNT)
+
+    def test_update_consumer(self):
+        consumer_ref = self._create_single_consumer()
+        update_ref = {'consumer': {'description': uuid.uuid4().hex}}
+        self.oauth_api.update_consumer(consumer_ref['id'], update_ref)
+        self._assert_notify_sent(consumer_ref['id'],
+                                 test_notifications.UPDATED_OPERATION,
+                                 'OS-OAUTH1:consumer')
+        self._assert_last_audit(consumer_ref['id'],
+                                test_notifications.UPDATED_OPERATION,
+                                'OS-OAUTH1:consumer',
+                                cadftaxonomy.SECURITY_ACCOUNT)
+
+    def test_delete_consumer(self):
+        consumer_ref = self._create_single_consumer()
+        self.oauth_api.delete_consumer(consumer_ref['id'])
+        self._assert_notify_sent(consumer_ref['id'],
+                                 test_notifications.DELETED_OPERATION,
+                                 'OS-OAUTH1:consumer')
+        self._assert_last_audit(consumer_ref['id'],
+                                test_notifications.DELETED_OPERATION,
+                                'OS-OAUTH1:consumer',
+                                cadftaxonomy.SECURITY_ACCOUNT)
+
+    def test_oauth_flow_notifications(self):
+        """Test to ensure notifications are sent for oauth tokens
+
+        This test is very similar to test_oauth_flow, however
+        there are additional checks in this test for ensuring that
+        notifications for request token creation, and access token
+        creation/deletion are emitted.
+        """
+
+        consumer = self._create_single_consumer()
+        consumer_id = consumer['id']
+        consumer_secret = consumer['secret']
+        self.consumer = {'key': consumer_id, 'secret': consumer_secret}
+        self.assertIsNotNone(self.consumer['secret'])
+
+        url, headers = self._create_request_token(self.consumer,
+                                                  self.project_id)
+        content = self.post(
+            url, headers=headers,
+            response_content_type='application/x-www-urlformencoded')
+        credentials = urllib.parse.parse_qs(content.result)
+        request_key = credentials['oauth_token'][0]
+        request_secret = credentials['oauth_token_secret'][0]
+        self.request_token = oauth1.Token(request_key, request_secret)
+        self.assertIsNotNone(self.request_token.key)
+
+        # Test to ensure the create request token notification is sent
+        self._assert_notify_sent(request_key,
+                                 test_notifications.CREATED_OPERATION,
+                                 'OS-OAUTH1:request_token')
+        self._assert_last_audit(request_key,
+                                test_notifications.CREATED_OPERATION,
+                                'OS-OAUTH1:request_token',
+                                cadftaxonomy.SECURITY_CREDENTIAL)
+
+        url = self._authorize_request_token(request_key)
+        body = {'roles': [{'id': self.role_id}]}
+        resp = self.put(url, body=body, expected_status=200)
+        self.verifier = resp.result['token']['oauth_verifier']
+        self.assertTrue(all(i in core.VERIFIER_CHARS for i in self.verifier))
+        self.assertEqual(8, len(self.verifier))
+
+        self.request_token.set_verifier(self.verifier)
+        url, headers = self._create_access_token(self.consumer,
+                                                 self.request_token)
+        content = self.post(
+            url, headers=headers,
+            response_content_type='application/x-www-urlformencoded')
+        credentials = urllib.parse.parse_qs(content.result)
+        access_key = credentials['oauth_token'][0]
+        access_secret = credentials['oauth_token_secret'][0]
+        self.access_token = oauth1.Token(access_key, access_secret)
+        self.assertIsNotNone(self.access_token.key)
+
+        # Test to ensure the create access token notification is sent
+        self._assert_notify_sent(access_key,
+                                 test_notifications.CREATED_OPERATION,
+                                 'OS-OAUTH1:access_token')
+        self._assert_last_audit(access_key,
+                                test_notifications.CREATED_OPERATION,
+                                'OS-OAUTH1:access_token',
+                                cadftaxonomy.SECURITY_CREDENTIAL)
+
+        resp = self.delete('/users/%(user)s/OS-OAUTH1/access_tokens/%(auth)s'
+                           % {'user': self.user_id,
+                              'auth': self.access_token.key})
+        self.assertResponseStatus(resp, 204)
+
+        # Test to ensure the delete access token notification is sent
+        self._assert_notify_sent(access_key,
+                                 test_notifications.DELETED_OPERATION,
+                                 'OS-OAUTH1:access_token')
+        self._assert_last_audit(access_key,
+                                test_notifications.DELETED_OPERATION,
+                                'OS-OAUTH1:access_token',
+                                cadftaxonomy.SECURITY_CREDENTIAL)
+
+
+class OAuthCADFNotificationTests(OAuthNotificationTests):
+
+    def setUp(self):
+        """Repeat the tests for CADF notifications """
+        super(OAuthCADFNotificationTests, self).setUp()
+        self.config_fixture.config(notification_format='cadf')
+
+
+class JsonHomeTests(OAuth1Tests, test_v3.JsonHomeTestMixin):
+    JSON_HOME_DATA = {
+        'http://docs.openstack.org/api/openstack-identity/3/ext/OS-OAUTH1/1.0/'
+        'rel/consumers': {
+            'href': '/OS-OAUTH1/consumers',
+        },
+    }
diff --git a/keystone-moon/keystone/tests/unit/test_v3_os_revoke.py b/keystone-moon/keystone/tests/unit/test_v3_os_revoke.py
new file mode 100644 (file)
index 0000000..5710d97
--- /dev/null
@@ -0,0 +1,135 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import uuid
+
+from oslo_utils import timeutils
+import six
+from testtools import matchers
+
+from keystone.contrib.revoke import model
+from keystone.tests.unit import test_v3
+from keystone.token import provider
+
+
+def _future_time_string():
+    expire_delta = datetime.timedelta(seconds=1000)
+    future_time = timeutils.utcnow() + expire_delta
+    return timeutils.isotime(future_time)
+
+
+class OSRevokeTests(test_v3.RestfulTestCase, test_v3.JsonHomeTestMixin):
+    EXTENSION_NAME = 'revoke'
+    EXTENSION_TO_ADD = 'revoke_extension'
+
+    JSON_HOME_DATA = {
+        'http://docs.openstack.org/api/openstack-identity/3/ext/OS-REVOKE/1.0/'
+        'rel/events': {
+            'href': '/OS-REVOKE/events',
+        },
+    }
+
+    def test_get_empty_list(self):
+        resp = self.get('/OS-REVOKE/events')
+        self.assertEqual([], resp.json_body['events'])
+
+    def _blank_event(self):
+        return {}
+
+    # The two values will be the same with the exception of
+    # 'issued_before' which is set when the event is recorded.
+    def assertReportedEventMatchesRecorded(self, event, sample, before_time):
+        after_time = timeutils.utcnow()
+        event_issued_before = timeutils.normalize_time(
+            timeutils.parse_isotime(event['issued_before']))
+        self.assertTrue(
+            before_time <= event_issued_before,
+            'invalid event issued_before time; %s is not later than %s.' % (
+                timeutils.isotime(event_issued_before, subsecond=True),
+                timeutils.isotime(before_time, subsecond=True)))
+        self.assertTrue(
+            event_issued_before <= after_time,
+            'invalid event issued_before time; %s is not earlier than %s.' % (
+                timeutils.isotime(event_issued_before, subsecond=True),
+                timeutils.isotime(after_time, subsecond=True)))
+        del (event['issued_before'])
+        self.assertEqual(sample, event)
+
+    def test_revoked_list_self_url(self):
+        revoked_list_url = '/OS-REVOKE/events'
+        resp = self.get(revoked_list_url)
+        links = resp.json_body['links']
+        self.assertThat(links['self'], matchers.EndsWith(revoked_list_url))
+
+    def test_revoked_token_in_list(self):
+        user_id = uuid.uuid4().hex
+        expires_at = provider.default_expire_time()
+        sample = self._blank_event()
+        sample['user_id'] = six.text_type(user_id)
+        sample['expires_at'] = six.text_type(timeutils.isotime(expires_at))
+        before_time = timeutils.utcnow()
+        self.revoke_api.revoke_by_expiration(user_id, expires_at)
+        resp = self.get('/OS-REVOKE/events')
+        events = resp.json_body['events']
+        self.assertEqual(1, len(events))
+        self.assertReportedEventMatchesRecorded(events[0], sample, before_time)
+
+    def test_disabled_project_in_list(self):
+        project_id = uuid.uuid4().hex
+        sample = dict()
+        sample['project_id'] = six.text_type(project_id)
+        before_time = timeutils.utcnow()
+        self.revoke_api.revoke(
+            model.RevokeEvent(project_id=project_id))
+
+        resp = self.get('/OS-REVOKE/events')
+        events = resp.json_body['events']
+        self.assertEqual(1, len(events))
+        self.assertReportedEventMatchesRecorded(events[0], sample, before_time)
+
+    def test_disabled_domain_in_list(self):
+        domain_id = uuid.uuid4().hex
+        sample = dict()
+        sample['domain_id'] = six.text_type(domain_id)
+        before_time = timeutils.utcnow()
+        self.revoke_api.revoke(
+            model.RevokeEvent(domain_id=domain_id))
+
+        resp = self.get('/OS-REVOKE/events')
+        events = resp.json_body['events']
+        self.assertEqual(1, len(events))
+        self.assertReportedEventMatchesRecorded(events[0], sample, before_time)
+
+    def test_list_since_invalid(self):
+        self.get('/OS-REVOKE/events?since=blah', expected_status=400)
+
+    def test_list_since_valid(self):
+        resp = self.get('/OS-REVOKE/events?since=2013-02-27T18:30:59.999999Z')
+        events = resp.json_body['events']
+        self.assertEqual(0, len(events))
+
+    def test_since_future_time_no_events(self):
+        domain_id = uuid.uuid4().hex
+        sample = dict()
+        sample['domain_id'] = six.text_type(domain_id)
+
+        self.revoke_api.revoke(
+            model.RevokeEvent(domain_id=domain_id))
+
+        resp = self.get('/OS-REVOKE/events')
+        events = resp.json_body['events']
+        self.assertEqual(1, len(events))
+
+        resp = self.get('/OS-REVOKE/events?since=%s' % _future_time_string())
+        events = resp.json_body['events']
+        self.assertEqual([], events)
diff --git a/keystone-moon/keystone/tests/unit/test_v3_policy.py b/keystone-moon/keystone/tests/unit/test_v3_policy.py
new file mode 100644 (file)
index 0000000..538fc56
--- /dev/null
@@ -0,0 +1,68 @@
+# Copyright 2013 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import uuid
+
+from keystone.tests.unit import test_v3
+
+
+class PolicyTestCase(test_v3.RestfulTestCase):
+    """Test policy CRUD."""
+
+    def setUp(self):
+        super(PolicyTestCase, self).setUp()
+        self.policy_id = uuid.uuid4().hex
+        self.policy = self.new_policy_ref()
+        self.policy['id'] = self.policy_id
+        self.policy_api.create_policy(
+            self.policy_id,
+            self.policy.copy())
+
+    # policy crud tests
+
+    def test_create_policy(self):
+        """Call ``POST /policies``."""
+        ref = self.new_policy_ref()
+        r = self.post(
+            '/policies',
+            body={'policy': ref})
+        return self.assertValidPolicyResponse(r, ref)
+
+    def test_list_policies(self):
+        """Call ``GET /policies``."""
+        r = self.get('/policies')
+        self.assertValidPolicyListResponse(r, ref=self.policy)
+
+    def test_get_policy(self):
+        """Call ``GET /policies/{policy_id}``."""
+        r = self.get(
+            '/policies/%(policy_id)s' % {
+                'policy_id': self.policy_id})
+        self.assertValidPolicyResponse(r, self.policy)
+
+    def test_update_policy(self):
+        """Call ``PATCH /policies/{policy_id}``."""
+        policy = self.new_policy_ref()
+        policy['id'] = self.policy_id
+        r = self.patch(
+            '/policies/%(policy_id)s' % {
+                'policy_id': self.policy_id},
+            body={'policy': policy})
+        self.assertValidPolicyResponse(r, policy)
+
+    def test_delete_policy(self):
+        """Call ``DELETE /policies/{policy_id}``."""
+        self.delete(
+            '/policies/%(policy_id)s' % {
+                'policy_id': self.policy_id})
diff --git a/keystone-moon/keystone/tests/unit/test_v3_protection.py b/keystone-moon/keystone/tests/unit/test_v3_protection.py
new file mode 100644 (file)
index 0000000..2b2c96d
--- /dev/null
@@ -0,0 +1,1170 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from oslo_config import cfg
+from oslo_serialization import jsonutils
+
+from keystone import exception
+from keystone.policy.backends import rules
+from keystone.tests import unit as tests
+from keystone.tests.unit.ksfixtures import temporaryfile
+from keystone.tests.unit import test_v3
+
+
+CONF = cfg.CONF
+DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
+
+
+class IdentityTestProtectedCase(test_v3.RestfulTestCase):
+    """Test policy enforcement on the v3 Identity API."""
+
+    def setUp(self):
+        """Setup for Identity Protection Test Cases.
+
+        As well as the usual housekeeping, create a set of domains,
+        users, roles and projects for the subsequent tests:
+
+        - Three domains: A,B & C.  C is disabled.
+        - DomainA has user1, DomainB has user2 and user3
+        - DomainA has group1 and group2, DomainB has group3
+        - User1 has two roles on DomainA
+        - User2 has one role on DomainA
+
+        Remember that there will also be a fourth domain in existence,
+        the default domain.
+
+        """
+        # Ensure that test_v3.RestfulTestCase doesn't load its own
+        # sample data, which would make checking the results of our
+        # tests harder
+        super(IdentityTestProtectedCase, self).setUp()
+        self.tempfile = self.useFixture(temporaryfile.SecureTempFile())
+        self.tmpfilename = self.tempfile.file_name
+        self.config_fixture.config(group='oslo_policy',
+                                   policy_file=self.tmpfilename)
+
+        # A default auth request we can use - un-scoped user token
+        self.auth = self.build_authentication_request(
+            user_id=self.user1['id'],
+            password=self.user1['password'])
+
+    def load_sample_data(self):
+        self._populate_default_domain()
+        # Start by creating a couple of domains
+        self.domainA = self.new_domain_ref()
+        self.resource_api.create_domain(self.domainA['id'], self.domainA)
+        self.domainB = self.new_domain_ref()
+        self.resource_api.create_domain(self.domainB['id'], self.domainB)
+        self.domainC = self.new_domain_ref()
+        self.domainC['enabled'] = False
+        self.resource_api.create_domain(self.domainC['id'], self.domainC)
+
+        # Now create some users, one in domainA and two of them in domainB
+        self.user1 = self.new_user_ref(domain_id=self.domainA['id'])
+        password = uuid.uuid4().hex
+        self.user1['password'] = password
+        self.user1 = self.identity_api.create_user(self.user1)
+        self.user1['password'] = password
+
+        self.user2 = self.new_user_ref(domain_id=self.domainB['id'])
+        password = uuid.uuid4().hex
+        self.user2['password'] = password
+        self.user2 = self.identity_api.create_user(self.user2)
+        self.user2['password'] = password
+
+        self.user3 = self.new_user_ref(domain_id=self.domainB['id'])
+        password = uuid.uuid4().hex
+        self.user3['password'] = password
+        self.user3 = self.identity_api.create_user(self.user3)
+        self.user3['password'] = password
+
+        self.group1 = self.new_group_ref(domain_id=self.domainA['id'])
+        self.group1 = self.identity_api.create_group(self.group1)
+
+        self.group2 = self.new_group_ref(domain_id=self.domainA['id'])
+        self.group2 = self.identity_api.create_group(self.group2)
+
+        self.group3 = self.new_group_ref(domain_id=self.domainB['id'])
+        self.group3 = self.identity_api.create_group(self.group3)
+
+        self.role = self.new_role_ref()
+        self.role_api.create_role(self.role['id'], self.role)
+        self.role1 = self.new_role_ref()
+        self.role_api.create_role(self.role1['id'], self.role1)
+        self.assignment_api.create_grant(self.role['id'],
+                                         user_id=self.user1['id'],
+                                         domain_id=self.domainA['id'])
+        self.assignment_api.create_grant(self.role['id'],
+                                         user_id=self.user2['id'],
+                                         domain_id=self.domainA['id'])
+        self.assignment_api.create_grant(self.role1['id'],
+                                         user_id=self.user1['id'],
+                                         domain_id=self.domainA['id'])
+
+    def _get_id_list_from_ref_list(self, ref_list):
+        result_list = []
+        for x in ref_list:
+            result_list.append(x['id'])
+        return result_list
+
+    def _set_policy(self, new_policy):
+        with open(self.tmpfilename, "w") as policyfile:
+            policyfile.write(jsonutils.dumps(new_policy))
+
+    def test_list_users_unprotected(self):
+        """GET /users (unprotected)
+
+        Test Plan:
+
+        - Update policy so api is unprotected
+        - Use an un-scoped token to make sure we can get back all
+          the users independent of domain
+
+        """
+        self._set_policy({"identity:list_users": []})
+        r = self.get('/users', auth=self.auth)
+        id_list = self._get_id_list_from_ref_list(r.result.get('users'))
+        self.assertIn(self.user1['id'], id_list)
+        self.assertIn(self.user2['id'], id_list)
+        self.assertIn(self.user3['id'], id_list)
+
+    def test_list_users_filtered_by_domain(self):
+        """GET /users?domain_id=mydomain (filtered)
+
+        Test Plan:
+
+        - Update policy so api is unprotected
+        - Use an un-scoped token to make sure we can filter the
+          users by domainB, getting back the 2 users in that domain
+
+        """
+        self._set_policy({"identity:list_users": []})
+        url_by_name = '/users?domain_id=%s' % self.domainB['id']
+        r = self.get(url_by_name, auth=self.auth)
+        # We should  get back two users, those in DomainB
+        id_list = self._get_id_list_from_ref_list(r.result.get('users'))
+        self.assertIn(self.user2['id'], id_list)
+        self.assertIn(self.user3['id'], id_list)
+
+    def test_get_user_protected_match_id(self):
+        """GET /users/{id} (match payload)
+
+        Test Plan:
+
+        - Update policy to protect api by user_id
+        - List users with user_id of user1 as filter, to check that
+          this will correctly match user_id in the flattened
+          payload
+
+        """
+        # TODO(henry-nash, ayoung): It would be good to expand this
+        # test for further test flattening, e.g. protect on, say, an
+        # attribute of an object being created
+        new_policy = {"identity:get_user": [["user_id:%(user_id)s"]]}
+        self._set_policy(new_policy)
+        url_by_name = '/users/%s' % self.user1['id']
+        r = self.get(url_by_name, auth=self.auth)
+        self.assertEqual(self.user1['id'], r.result['user']['id'])
+
+    def test_get_user_protected_match_target(self):
+        """GET /users/{id} (match target)
+
+        Test Plan:
+
+        - Update policy to protect api by domain_id
+        - Try and read a user who is in DomainB with a token scoped
+          to Domain A - this should fail
+        - Retry this for a user who is in Domain A, which should succeed.
+        - Finally, try getting a user that does not exist, which should
+          still return UserNotFound
+
+        """
+        new_policy = {'identity:get_user':
+                      [["domain_id:%(target.user.domain_id)s"]]}
+        self._set_policy(new_policy)
+        self.auth = self.build_authentication_request(
+            user_id=self.user1['id'],
+            password=self.user1['password'],
+            domain_id=self.domainA['id'])
+        url_by_name = '/users/%s' % self.user2['id']
+        r = self.get(url_by_name, auth=self.auth,
+                     expected_status=exception.ForbiddenAction.code)
+
+        url_by_name = '/users/%s' % self.user1['id']
+        r = self.get(url_by_name, auth=self.auth)
+        self.assertEqual(self.user1['id'], r.result['user']['id'])
+
+        url_by_name = '/users/%s' % uuid.uuid4().hex
+        r = self.get(url_by_name, auth=self.auth,
+                     expected_status=exception.UserNotFound.code)
+
+    def test_revoke_grant_protected_match_target(self):
+        """DELETE /domains/{id}/users/{id}/roles/{id} (match target)
+
+        Test Plan:
+
+        - Update policy to protect api by domain_id of entities in
+          the grant
+        - Try and delete the existing grant that has a user who is
+          from a different domain - this should fail.
+        - Retry this for a user who is in Domain A, which should succeed.
+
+        """
+        new_policy = {'identity:revoke_grant':
+                      [["domain_id:%(target.user.domain_id)s"]]}
+        self._set_policy(new_policy)
+        collection_url = (
+            '/domains/%(domain_id)s/users/%(user_id)s/roles' % {
+                'domain_id': self.domainA['id'],
+                'user_id': self.user2['id']})
+        member_url = '%(collection_url)s/%(role_id)s' % {
+            'collection_url': collection_url,
+            'role_id': self.role['id']}
+
+        self.auth = self.build_authentication_request(
+            user_id=self.user1['id'],
+            password=self.user1['password'],
+            domain_id=self.domainA['id'])
+        self.delete(member_url, auth=self.auth,
+                    expected_status=exception.ForbiddenAction.code)
+
+        collection_url = (
+            '/domains/%(domain_id)s/users/%(user_id)s/roles' % {
+                'domain_id': self.domainA['id'],
+                'user_id': self.user1['id']})
+        member_url = '%(collection_url)s/%(role_id)s' % {
+            'collection_url': collection_url,
+            'role_id': self.role1['id']}
+        self.delete(member_url, auth=self.auth)
+
+    def test_list_users_protected_by_domain(self):
+        """GET /users?domain_id=mydomain (protected)
+
+        Test Plan:
+
+        - Update policy to protect api by domain_id
+        - List groups using a token scoped to domainA with a filter
+          specifying domainA - we should only get back the one user
+          that is in domainA.
+        - Try and read the users from domainB - this should fail since
+          we don't have a token scoped for domainB
+
+        """
+        new_policy = {"identity:list_users": ["domain_id:%(domain_id)s"]}
+        self._set_policy(new_policy)
+        self.auth = self.build_authentication_request(
+            user_id=self.user1['id'],
+            password=self.user1['password'],
+            domain_id=self.domainA['id'])
+        url_by_name = '/users?domain_id=%s' % self.domainA['id']
+        r = self.get(url_by_name, auth=self.auth)
+        # We should only get back one user, the one in DomainA
+        id_list = self._get_id_list_from_ref_list(r.result.get('users'))
+        self.assertEqual(1, len(id_list))
+        self.assertIn(self.user1['id'], id_list)
+
+        # Now try for domainB, which should fail
+        url_by_name = '/users?domain_id=%s' % self.domainB['id']
+        r = self.get(url_by_name, auth=self.auth,
+                     expected_status=exception.ForbiddenAction.code)
+
+    def test_list_groups_protected_by_domain(self):
+        """GET /groups?domain_id=mydomain (protected)
+
+        Test Plan:
+
+        - Update policy to protect api by domain_id
+        - List groups using a token scoped to domainA and make sure
+          we only get back the two groups that are in domainA
+        - Try and read the groups from domainB - this should fail since
+          we don't have a token scoped for domainB
+
+        """
+        new_policy = {"identity:list_groups": ["domain_id:%(domain_id)s"]}
+        self._set_policy(new_policy)
+        self.auth = self.build_authentication_request(
+            user_id=self.user1['id'],
+            password=self.user1['password'],
+            domain_id=self.domainA['id'])
+        url_by_name = '/groups?domain_id=%s' % self.domainA['id']
+        r = self.get(url_by_name, auth=self.auth)
+        # We should only get back two groups, the ones in DomainA
+        id_list = self._get_id_list_from_ref_list(r.result.get('groups'))
+        self.assertEqual(2, len(id_list))
+        self.assertIn(self.group1['id'], id_list)
+        self.assertIn(self.group2['id'], id_list)
+
+        # Now try for domainB, which should fail
+        url_by_name = '/groups?domain_id=%s' % self.domainB['id']
+        r = self.get(url_by_name, auth=self.auth,
+                     expected_status=exception.ForbiddenAction.code)
+
+    def test_list_groups_protected_by_domain_and_filtered(self):
+        """GET /groups?domain_id=mydomain&name=myname (protected)
+
+        Test Plan:
+
+        - Update policy to protect api by domain_id
+        - List groups using a token scoped to domainA with a filter
+          specifying both domainA and the name of group.
+        - We should only get back the group in domainA that matches
+          the name
+
+        """
+        new_policy = {"identity:list_groups": ["domain_id:%(domain_id)s"]}
+        self._set_policy(new_policy)
+        self.auth = self.build_authentication_request(
+            user_id=self.user1['id'],
+            password=self.user1['password'],
+            domain_id=self.domainA['id'])
+        url_by_name = '/groups?domain_id=%s&name=%s' % (
+            self.domainA['id'], self.group2['name'])
+        r = self.get(url_by_name, auth=self.auth)
+        # We should only get back one user, the one in DomainA that matches
+        # the name supplied
+        id_list = self._get_id_list_from_ref_list(r.result.get('groups'))
+        self.assertEqual(1, len(id_list))
+        self.assertIn(self.group2['id'], id_list)
+
+
+class IdentityTestPolicySample(test_v3.RestfulTestCase):
+    """Test policy enforcement of the policy.json file."""
+
+    def load_sample_data(self):
+        self._populate_default_domain()
+
+        self.just_a_user = self.new_user_ref(
+            domain_id=CONF.identity.default_domain_id)
+        password = uuid.uuid4().hex
+        self.just_a_user['password'] = password
+        self.just_a_user = self.identity_api.create_user(self.just_a_user)
+        self.just_a_user['password'] = password
+
+        self.another_user = self.new_user_ref(
+            domain_id=CONF.identity.default_domain_id)
+        password = uuid.uuid4().hex
+        self.another_user['password'] = password
+        self.another_user = self.identity_api.create_user(self.another_user)
+        self.another_user['password'] = password
+
+        self.admin_user = self.new_user_ref(
+            domain_id=CONF.identity.default_domain_id)
+        password = uuid.uuid4().hex
+        self.admin_user['password'] = password
+        self.admin_user = self.identity_api.create_user(self.admin_user)
+        self.admin_user['password'] = password
+
+        self.role = self.new_role_ref()
+        self.role_api.create_role(self.role['id'], self.role)
+        self.admin_role = {'id': uuid.uuid4().hex, 'name': 'admin'}
+        self.role_api.create_role(self.admin_role['id'], self.admin_role)
+
+        # Create and assign roles to the project
+        self.project = self.new_project_ref(
+            domain_id=CONF.identity.default_domain_id)
+        self.resource_api.create_project(self.project['id'], self.project)
+        self.assignment_api.create_grant(self.role['id'],
+                                         user_id=self.just_a_user['id'],
+                                         project_id=self.project['id'])
+        self.assignment_api.create_grant(self.role['id'],
+                                         user_id=self.another_user['id'],
+                                         project_id=self.project['id'])
+        self.assignment_api.create_grant(self.admin_role['id'],
+                                         user_id=self.admin_user['id'],
+                                         project_id=self.project['id'])
+
+    def test_user_validate_same_token(self):
+        # Given a non-admin user token, the token can be used to validate
+        # itself.
+        # This is GET /v3/auth/tokens, with X-Auth-Token == X-Subject-Token
+        # FIXME(blk-u): This test fails, a user can't validate their own token,
+        # see bug 1421825.
+
+        auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        token = self.get_requested_token(auth)
+
+        # FIXME(blk-u): remove expected_status=403.
+        self.get('/auth/tokens', token=token,
+                 headers={'X-Subject-Token': token}, expected_status=403)
+
+    def test_user_validate_user_token(self):
+        # A user can validate one of their own tokens.
+        # This is GET /v3/auth/tokens
+        # FIXME(blk-u): This test fails, a user can't validate their own token,
+        # see bug 1421825.
+
+        auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        token1 = self.get_requested_token(auth)
+        token2 = self.get_requested_token(auth)
+
+        # FIXME(blk-u): remove expected_status=403.
+        self.get('/auth/tokens', token=token1,
+                 headers={'X-Subject-Token': token2}, expected_status=403)
+
+    def test_user_validate_other_user_token_rejected(self):
+        # A user cannot validate another user's token.
+        # This is GET /v3/auth/tokens
+
+        user1_auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        user1_token = self.get_requested_token(user1_auth)
+
+        user2_auth = self.build_authentication_request(
+            user_id=self.another_user['id'],
+            password=self.another_user['password'])
+        user2_token = self.get_requested_token(user2_auth)
+
+        self.get('/auth/tokens', token=user1_token,
+                 headers={'X-Subject-Token': user2_token}, expected_status=403)
+
+    def test_admin_validate_user_token(self):
+        # An admin can validate a user's token.
+        # This is GET /v3/auth/tokens
+
+        admin_auth = self.build_authentication_request(
+            user_id=self.admin_user['id'],
+            password=self.admin_user['password'],
+            project_id=self.project['id'])
+        admin_token = self.get_requested_token(admin_auth)
+
+        user_auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        user_token = self.get_requested_token(user_auth)
+
+        self.get('/auth/tokens', token=admin_token,
+                 headers={'X-Subject-Token': user_token})
+
+    def test_user_check_same_token(self):
+        # Given a non-admin user token, the token can be used to check
+        # itself.
+        # This is HEAD /v3/auth/tokens, with X-Auth-Token == X-Subject-Token
+        # FIXME(blk-u): This test fails, a user can't check the same token,
+        # see bug 1421825.
+
+        auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        token = self.get_requested_token(auth)
+
+        # FIXME(blk-u): change to expected_status=200
+        self.head('/auth/tokens', token=token,
+                  headers={'X-Subject-Token': token}, expected_status=403)
+
+    def test_user_check_user_token(self):
+        # A user can check one of their own tokens.
+        # This is HEAD /v3/auth/tokens
+        # FIXME(blk-u): This test fails, a user can't check the same token,
+        # see bug 1421825.
+
+        auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        token1 = self.get_requested_token(auth)
+        token2 = self.get_requested_token(auth)
+
+        # FIXME(blk-u): change to expected_status=200
+        self.head('/auth/tokens', token=token1,
+                  headers={'X-Subject-Token': token2}, expected_status=403)
+
+    def test_user_check_other_user_token_rejected(self):
+        # A user cannot check another user's token.
+        # This is HEAD /v3/auth/tokens
+
+        user1_auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        user1_token = self.get_requested_token(user1_auth)
+
+        user2_auth = self.build_authentication_request(
+            user_id=self.another_user['id'],
+            password=self.another_user['password'])
+        user2_token = self.get_requested_token(user2_auth)
+
+        self.head('/auth/tokens', token=user1_token,
+                  headers={'X-Subject-Token': user2_token},
+                  expected_status=403)
+
+    def test_admin_check_user_token(self):
+        # An admin can check a user's token.
+        # This is HEAD /v3/auth/tokens
+
+        admin_auth = self.build_authentication_request(
+            user_id=self.admin_user['id'],
+            password=self.admin_user['password'],
+            project_id=self.project['id'])
+        admin_token = self.get_requested_token(admin_auth)
+
+        user_auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        user_token = self.get_requested_token(user_auth)
+
+        self.head('/auth/tokens', token=admin_token,
+                  headers={'X-Subject-Token': user_token}, expected_status=200)
+
+    def test_user_revoke_same_token(self):
+        # Given a non-admin user token, the token can be used to revoke
+        # itself.
+        # This is DELETE /v3/auth/tokens, with X-Auth-Token == X-Subject-Token
+        # FIXME(blk-u): This test fails, a user can't revoke the same token,
+        # see bug 1421825.
+
+        auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        token = self.get_requested_token(auth)
+
+        # FIXME(blk-u): remove expected_status=403
+        self.delete('/auth/tokens', token=token,
+                    headers={'X-Subject-Token': token}, expected_status=403)
+
+    def test_user_revoke_user_token(self):
+        # A user can revoke one of their own tokens.
+        # This is DELETE /v3/auth/tokens
+        # FIXME(blk-u): This test fails, a user can't revoke the same token,
+        # see bug 1421825.
+
+        auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        token1 = self.get_requested_token(auth)
+        token2 = self.get_requested_token(auth)
+
+        # FIXME(blk-u): remove expected_status=403
+        self.delete('/auth/tokens', token=token1,
+                    headers={'X-Subject-Token': token2}, expected_status=403)
+
+    def test_user_revoke_other_user_token_rejected(self):
+        # A user cannot revoke another user's token.
+        # This is DELETE /v3/auth/tokens
+
+        user1_auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        user1_token = self.get_requested_token(user1_auth)
+
+        user2_auth = self.build_authentication_request(
+            user_id=self.another_user['id'],
+            password=self.another_user['password'])
+        user2_token = self.get_requested_token(user2_auth)
+
+        self.delete('/auth/tokens', token=user1_token,
+                    headers={'X-Subject-Token': user2_token},
+                    expected_status=403)
+
+    def test_admin_revoke_user_token(self):
+        # An admin can revoke a user's token.
+        # This is DELETE /v3/auth/tokens
+
+        admin_auth = self.build_authentication_request(
+            user_id=self.admin_user['id'],
+            password=self.admin_user['password'],
+            project_id=self.project['id'])
+        admin_token = self.get_requested_token(admin_auth)
+
+        user_auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        user_token = self.get_requested_token(user_auth)
+
+        self.delete('/auth/tokens', token=admin_token,
+                    headers={'X-Subject-Token': user_token})
+
+
+class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase):
+    """Test policy enforcement of the sample v3 cloud policy file."""
+
+    def setUp(self):
+        """Setup for v3 Cloud Policy Sample Test Cases.
+
+        The following data is created:
+
+        - Three domains: domainA, domainB and admin_domain
+        - One project, which name is 'project'
+        - domainA has three users: domain_admin_user, project_admin_user and
+          just_a_user:
+
+          - domain_admin_user has role 'admin' on domainA,
+          - project_admin_user has role 'admin' on the project,
+          - just_a_user has a non-admin role on both domainA and the project.
+        - admin_domain has user cloud_admin_user, with an 'admin' role
+          on admin_domain.
+
+        We test various api protection rules from the cloud sample policy
+        file to make sure the sample is valid and that we correctly enforce it.
+
+        """
+        # Ensure that test_v3.RestfulTestCase doesn't load its own
+        # sample data, which would make checking the results of our
+        # tests harder
+        super(IdentityTestv3CloudPolicySample, self).setUp()
+
+        # Finally, switch to the v3 sample policy file
+        self.addCleanup(rules.reset)
+        rules.reset()
+        self.config_fixture.config(
+            group='oslo_policy',
+            policy_file=tests.dirs.etc('policy.v3cloudsample.json'))
+
+    def load_sample_data(self):
+        # Start by creating a couple of domains
+        self._populate_default_domain()
+        self.domainA = self.new_domain_ref()
+        self.resource_api.create_domain(self.domainA['id'], self.domainA)
+        self.domainB = self.new_domain_ref()
+        self.resource_api.create_domain(self.domainB['id'], self.domainB)
+        self.admin_domain = {'id': 'admin_domain_id', 'name': 'Admin_domain'}
+        self.resource_api.create_domain(self.admin_domain['id'],
+                                        self.admin_domain)
+
+        # And our users
+        self.cloud_admin_user = self.new_user_ref(
+            domain_id=self.admin_domain['id'])
+        password = uuid.uuid4().hex
+        self.cloud_admin_user['password'] = password
+        self.cloud_admin_user = (
+            self.identity_api.create_user(self.cloud_admin_user))
+        self.cloud_admin_user['password'] = password
+        self.just_a_user = self.new_user_ref(domain_id=self.domainA['id'])
+        password = uuid.uuid4().hex
+        self.just_a_user['password'] = password
+        self.just_a_user = self.identity_api.create_user(self.just_a_user)
+        self.just_a_user['password'] = password
+        self.domain_admin_user = self.new_user_ref(
+            domain_id=self.domainA['id'])
+        password = uuid.uuid4().hex
+        self.domain_admin_user['password'] = password
+        self.domain_admin_user = (
+            self.identity_api.create_user(self.domain_admin_user))
+        self.domain_admin_user['password'] = password
+        self.project_admin_user = self.new_user_ref(
+            domain_id=self.domainA['id'])
+        password = uuid.uuid4().hex
+        self.project_admin_user['password'] = password
+        self.project_admin_user = (
+            self.identity_api.create_user(self.project_admin_user))
+        self.project_admin_user['password'] = password
+
+        # The admin role and another plain role
+        self.admin_role = {'id': uuid.uuid4().hex, 'name': 'admin'}
+        self.role_api.create_role(self.admin_role['id'], self.admin_role)
+        self.role = self.new_role_ref()
+        self.role_api.create_role(self.role['id'], self.role)
+
+        # The cloud admin just gets the admin role
+        self.assignment_api.create_grant(self.admin_role['id'],
+                                         user_id=self.cloud_admin_user['id'],
+                                         domain_id=self.admin_domain['id'])
+
+        # Assign roles to the domain
+        self.assignment_api.create_grant(self.admin_role['id'],
+                                         user_id=self.domain_admin_user['id'],
+                                         domain_id=self.domainA['id'])
+        self.assignment_api.create_grant(self.role['id'],
+                                         user_id=self.just_a_user['id'],
+                                         domain_id=self.domainA['id'])
+
+        # Create and assign roles to the project
+        self.project = self.new_project_ref(domain_id=self.domainA['id'])
+        self.resource_api.create_project(self.project['id'], self.project)
+        self.assignment_api.create_grant(self.admin_role['id'],
+                                         user_id=self.project_admin_user['id'],
+                                         project_id=self.project['id'])
+        self.assignment_api.create_grant(self.role['id'],
+                                         user_id=self.just_a_user['id'],
+                                         project_id=self.project['id'])
+
+    def _stati(self, expected_status):
+        # Return the expected return codes for APIs with and without data
+        # with any specified status overriding the normal values
+        if expected_status is None:
+            return (200, 201, 204)
+        else:
+            return (expected_status, expected_status, expected_status)
+
+    def _test_user_management(self, domain_id, expected=None):
+        status_OK, status_created, status_no_data = self._stati(expected)
+        entity_url = '/users/%s' % self.just_a_user['id']
+        list_url = '/users?domain_id=%s' % domain_id
+
+        self.get(entity_url, auth=self.auth,
+                 expected_status=status_OK)
+        self.get(list_url, auth=self.auth,
+                 expected_status=status_OK)
+        user = {'description': 'Updated'}
+        self.patch(entity_url, auth=self.auth, body={'user': user},
+                   expected_status=status_OK)
+        self.delete(entity_url, auth=self.auth,
+                    expected_status=status_no_data)
+
+        user_ref = self.new_user_ref(domain_id=domain_id)
+        self.post('/users', auth=self.auth, body={'user': user_ref},
+                  expected_status=status_created)
+
+    def _test_project_management(self, domain_id, expected=None):
+        status_OK, status_created, status_no_data = self._stati(expected)
+        entity_url = '/projects/%s' % self.project['id']
+        list_url = '/projects?domain_id=%s' % domain_id
+
+        self.get(entity_url, auth=self.auth,
+                 expected_status=status_OK)
+        self.get(list_url, auth=self.auth,
+                 expected_status=status_OK)
+        project = {'description': 'Updated'}
+        self.patch(entity_url, auth=self.auth, body={'project': project},
+                   expected_status=status_OK)
+        self.delete(entity_url, auth=self.auth,
+                    expected_status=status_no_data)
+
+        proj_ref = self.new_project_ref(domain_id=domain_id)
+        self.post('/projects', auth=self.auth, body={'project': proj_ref},
+                  expected_status=status_created)
+
+    def _test_domain_management(self, expected=None):
+        status_OK, status_created, status_no_data = self._stati(expected)
+        entity_url = '/domains/%s' % self.domainB['id']
+        list_url = '/domains'
+
+        self.get(entity_url, auth=self.auth,
+                 expected_status=status_OK)
+        self.get(list_url, auth=self.auth,
+                 expected_status=status_OK)
+        domain = {'description': 'Updated', 'enabled': False}
+        self.patch(entity_url, auth=self.auth, body={'domain': domain},
+                   expected_status=status_OK)
+        self.delete(entity_url, auth=self.auth,
+                    expected_status=status_no_data)
+
+        domain_ref = self.new_domain_ref()
+        self.post('/domains', auth=self.auth, body={'domain': domain_ref},
+                  expected_status=status_created)
+
+    def _test_grants(self, target, entity_id, expected=None):
+        status_OK, status_created, status_no_data = self._stati(expected)
+        a_role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+        self.role_api.create_role(a_role['id'], a_role)
+
+        collection_url = (
+            '/%(target)s/%(target_id)s/users/%(user_id)s/roles' % {
+                'target': target,
+                'target_id': entity_id,
+                'user_id': self.just_a_user['id']})
+        member_url = '%(collection_url)s/%(role_id)s' % {
+            'collection_url': collection_url,
+            'role_id': a_role['id']}
+
+        self.put(member_url, auth=self.auth,
+                 expected_status=status_no_data)
+        self.head(member_url, auth=self.auth,
+                  expected_status=status_no_data)
+        self.get(collection_url, auth=self.auth,
+                 expected_status=status_OK)
+        self.delete(member_url, auth=self.auth,
+                    expected_status=status_no_data)
+
+    def test_user_management(self):
+        # First, authenticate with a user that does not have the domain
+        # admin role - shouldn't be able to do much.
+        self.auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'],
+            domain_id=self.domainA['id'])
+
+        self._test_user_management(
+            self.domainA['id'], expected=exception.ForbiddenAction.code)
+
+        # Now, authenticate with a user that does have the domain admin role
+        self.auth = self.build_authentication_request(
+            user_id=self.domain_admin_user['id'],
+            password=self.domain_admin_user['password'],
+            domain_id=self.domainA['id'])
+
+        self._test_user_management(self.domainA['id'])
+
+    def test_user_management_by_cloud_admin(self):
+        # Test users management with a cloud admin. This user should
+        # be able to manage users in any domain.
+        self.auth = self.build_authentication_request(
+            user_id=self.cloud_admin_user['id'],
+            password=self.cloud_admin_user['password'],
+            domain_id=self.admin_domain['id'])
+
+        self._test_user_management(self.domainA['id'])
+
+    def test_project_management(self):
+        # First, authenticate with a user that does not have the project
+        # admin role - shouldn't be able to do much.
+        self.auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'],
+            domain_id=self.domainA['id'])
+
+        self._test_project_management(
+            self.domainA['id'], expected=exception.ForbiddenAction.code)
+
+        # ...but should still be able to list projects of which they are
+        # a member
+        url = '/users/%s/projects' % self.just_a_user['id']
+        self.get(url, auth=self.auth)
+
+        # Now, authenticate with a user that does have the domain admin role
+        self.auth = self.build_authentication_request(
+            user_id=self.domain_admin_user['id'],
+            password=self.domain_admin_user['password'],
+            domain_id=self.domainA['id'])
+
+        self._test_project_management(self.domainA['id'])
+
+    def test_project_management_by_cloud_admin(self):
+        self.auth = self.build_authentication_request(
+            user_id=self.cloud_admin_user['id'],
+            password=self.cloud_admin_user['password'],
+            domain_id=self.admin_domain['id'])
+
+        # Check whether cloud admin can operate a domain
+        # other than its own domain or not
+        self._test_project_management(self.domainA['id'])
+
+    def test_domain_grants(self):
+        self.auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'],
+            domain_id=self.domainA['id'])
+
+        self._test_grants('domains', self.domainA['id'],
+                          expected=exception.ForbiddenAction.code)
+
+        # Now, authenticate with a user that does have the domain admin role
+        self.auth = self.build_authentication_request(
+            user_id=self.domain_admin_user['id'],
+            password=self.domain_admin_user['password'],
+            domain_id=self.domainA['id'])
+
+        self._test_grants('domains', self.domainA['id'])
+
+        # Check that with such a token we cannot modify grants on a
+        # different domain
+        self._test_grants('domains', self.domainB['id'],
+                          expected=exception.ForbiddenAction.code)
+
+    def test_domain_grants_by_cloud_admin(self):
+        # Test domain grants with a cloud admin. This user should be
+        # able to manage roles on any domain.
+        self.auth = self.build_authentication_request(
+            user_id=self.cloud_admin_user['id'],
+            password=self.cloud_admin_user['password'],
+            domain_id=self.admin_domain['id'])
+
+        self._test_grants('domains', self.domainA['id'])
+
+    def test_project_grants(self):
+        self.auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'],
+            project_id=self.project['id'])
+
+        self._test_grants('projects', self.project['id'],
+                          expected=exception.ForbiddenAction.code)
+
+        # Now, authenticate with a user that does have the project
+        # admin role
+        self.auth = self.build_authentication_request(
+            user_id=self.project_admin_user['id'],
+            password=self.project_admin_user['password'],
+            project_id=self.project['id'])
+
+        self._test_grants('projects', self.project['id'])
+
+    def test_project_grants_by_domain_admin(self):
+        # Test project grants with a domain admin. This user should be
+        # able to manage roles on any project in its own domain.
+        self.auth = self.build_authentication_request(
+            user_id=self.domain_admin_user['id'],
+            password=self.domain_admin_user['password'],
+            domain_id=self.domainA['id'])
+
+        self._test_grants('projects', self.project['id'])
+
+    def test_cloud_admin(self):
+        self.auth = self.build_authentication_request(
+            user_id=self.domain_admin_user['id'],
+            password=self.domain_admin_user['password'],
+            domain_id=self.domainA['id'])
+
+        self._test_domain_management(
+            expected=exception.ForbiddenAction.code)
+
+        self.auth = self.build_authentication_request(
+            user_id=self.cloud_admin_user['id'],
+            password=self.cloud_admin_user['password'],
+            domain_id=self.admin_domain['id'])
+
+        self._test_domain_management()
+
+    def test_list_user_credentials(self):
+        self.credential_user = self.new_credential_ref(self.just_a_user['id'])
+        self.credential_api.create_credential(self.credential_user['id'],
+                                              self.credential_user)
+        self.credential_admin = self.new_credential_ref(
+            self.cloud_admin_user['id'])
+        self.credential_api.create_credential(self.credential_admin['id'],
+                                              self.credential_admin)
+
+        self.auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        url = '/credentials?user_id=%s' % self.just_a_user['id']
+        self.get(url, auth=self.auth)
+        url = '/credentials?user_id=%s' % self.cloud_admin_user['id']
+        self.get(url, auth=self.auth,
+                 expected_status=exception.ForbiddenAction.code)
+        url = '/credentials'
+        self.get(url, auth=self.auth,
+                 expected_status=exception.ForbiddenAction.code)
+
+    def test_get_and_delete_ec2_credentials(self):
+        """Tests getting and deleting ec2 credentials through the ec2 API."""
+        another_user = self.new_user_ref(domain_id=self.domainA['id'])
+        password = another_user['password']
+        another_user = self.identity_api.create_user(another_user)
+
+        # create a credential for just_a_user
+        just_user_auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'],
+            project_id=self.project['id'])
+        url = '/users/%s/credentials/OS-EC2' % self.just_a_user['id']
+        r = self.post(url, body={'tenant_id': self.project['id']},
+                      auth=just_user_auth)
+
+        # another normal user can't get the credential
+        another_user_auth = self.build_authentication_request(
+            user_id=another_user['id'],
+            password=password)
+        another_user_url = '/users/%s/credentials/OS-EC2/%s' % (
+            another_user['id'], r.result['credential']['access'])
+        self.get(another_user_url, auth=another_user_auth,
+                 expected_status=exception.ForbiddenAction.code)
+
+        # the owner can get the credential
+        just_user_url = '/users/%s/credentials/OS-EC2/%s' % (
+            self.just_a_user['id'], r.result['credential']['access'])
+        self.get(just_user_url, auth=just_user_auth)
+
+        # another normal user can't delete the credential
+        self.delete(another_user_url, auth=another_user_auth,
+                    expected_status=exception.ForbiddenAction.code)
+
+        # the owner can get the credential
+        self.delete(just_user_url, auth=just_user_auth)
+
+    def test_user_validate_same_token(self):
+        # Given a non-admin user token, the token can be used to validate
+        # itself.
+        # This is GET /v3/auth/tokens, with X-Auth-Token == X-Subject-Token
+        # FIXME(blk-u): This test fails, a user can't validate their own token,
+        # see bug 1421825.
+
+        auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        token = self.get_requested_token(auth)
+
+        # FIXME(blk-u): remove expected_status=403.
+        self.get('/auth/tokens', token=token,
+                 headers={'X-Subject-Token': token}, expected_status=403)
+
+    def test_user_validate_user_token(self):
+        # A user can validate one of their own tokens.
+        # This is GET /v3/auth/tokens
+        # FIXME(blk-u): This test fails, a user can't validate their own token,
+        # see bug 1421825.
+
+        auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        token1 = self.get_requested_token(auth)
+        token2 = self.get_requested_token(auth)
+
+        # FIXME(blk-u): remove expected_status=403.
+        self.get('/auth/tokens', token=token1,
+                 headers={'X-Subject-Token': token2}, expected_status=403)
+
+    def test_user_validate_other_user_token_rejected(self):
+        # A user cannot validate another user's token.
+        # This is GET /v3/auth/tokens
+
+        user1_auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        user1_token = self.get_requested_token(user1_auth)
+
+        user2_auth = self.build_authentication_request(
+            user_id=self.cloud_admin_user['id'],
+            password=self.cloud_admin_user['password'])
+        user2_token = self.get_requested_token(user2_auth)
+
+        self.get('/auth/tokens', token=user1_token,
+                 headers={'X-Subject-Token': user2_token}, expected_status=403)
+
+    def test_admin_validate_user_token(self):
+        # An admin can validate a user's token.
+        # This is GET /v3/auth/tokens
+
+        admin_auth = self.build_authentication_request(
+            user_id=self.cloud_admin_user['id'],
+            password=self.cloud_admin_user['password'],
+            domain_id=self.admin_domain['id'])
+        admin_token = self.get_requested_token(admin_auth)
+
+        user_auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        user_token = self.get_requested_token(user_auth)
+
+        self.get('/auth/tokens', token=admin_token,
+                 headers={'X-Subject-Token': user_token})
+
+    def test_user_check_same_token(self):
+        # Given a non-admin user token, the token can be used to check
+        # itself.
+        # This is HEAD /v3/auth/tokens, with X-Auth-Token == X-Subject-Token
+
+        auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        token = self.get_requested_token(auth)
+
+        self.head('/auth/tokens', token=token,
+                  headers={'X-Subject-Token': token}, expected_status=200)
+
+    def test_user_check_user_token(self):
+        # A user can check one of their own tokens.
+        # This is HEAD /v3/auth/tokens
+
+        auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        token1 = self.get_requested_token(auth)
+        token2 = self.get_requested_token(auth)
+
+        self.head('/auth/tokens', token=token1,
+                  headers={'X-Subject-Token': token2}, expected_status=200)
+
+    def test_user_check_other_user_token_rejected(self):
+        # A user cannot check another user's token.
+        # This is HEAD /v3/auth/tokens
+
+        user1_auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        user1_token = self.get_requested_token(user1_auth)
+
+        user2_auth = self.build_authentication_request(
+            user_id=self.cloud_admin_user['id'],
+            password=self.cloud_admin_user['password'])
+        user2_token = self.get_requested_token(user2_auth)
+
+        self.head('/auth/tokens', token=user1_token,
+                  headers={'X-Subject-Token': user2_token},
+                  expected_status=403)
+
+    def test_admin_check_user_token(self):
+        # An admin can check a user's token.
+        # This is HEAD /v3/auth/tokens
+
+        admin_auth = self.build_authentication_request(
+            user_id=self.domain_admin_user['id'],
+            password=self.domain_admin_user['password'],
+            domain_id=self.domainA['id'])
+        admin_token = self.get_requested_token(admin_auth)
+
+        user_auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        user_token = self.get_requested_token(user_auth)
+
+        self.head('/auth/tokens', token=admin_token,
+                  headers={'X-Subject-Token': user_token}, expected_status=200)
+
+    def test_user_revoke_same_token(self):
+        # Given a non-admin user token, the token can be used to revoke
+        # itself.
+        # This is DELETE /v3/auth/tokens, with X-Auth-Token == X-Subject-Token
+
+        auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        token = self.get_requested_token(auth)
+
+        self.delete('/auth/tokens', token=token,
+                    headers={'X-Subject-Token': token})
+
+    def test_user_revoke_user_token(self):
+        # A user can revoke one of their own tokens.
+        # This is DELETE /v3/auth/tokens
+
+        auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        token1 = self.get_requested_token(auth)
+        token2 = self.get_requested_token(auth)
+
+        self.delete('/auth/tokens', token=token1,
+                    headers={'X-Subject-Token': token2})
+
+    def test_user_revoke_other_user_token_rejected(self):
+        # A user cannot revoke another user's token.
+        # This is DELETE /v3/auth/tokens
+
+        user1_auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        user1_token = self.get_requested_token(user1_auth)
+
+        user2_auth = self.build_authentication_request(
+            user_id=self.cloud_admin_user['id'],
+            password=self.cloud_admin_user['password'])
+        user2_token = self.get_requested_token(user2_auth)
+
+        self.delete('/auth/tokens', token=user1_token,
+                    headers={'X-Subject-Token': user2_token},
+                    expected_status=403)
+
+    def test_admin_revoke_user_token(self):
+        # An admin can revoke a user's token.
+        # This is DELETE /v3/auth/tokens
+
+        admin_auth = self.build_authentication_request(
+            user_id=self.domain_admin_user['id'],
+            password=self.domain_admin_user['password'],
+            domain_id=self.domainA['id'])
+        admin_token = self.get_requested_token(admin_auth)
+
+        user_auth = self.build_authentication_request(
+            user_id=self.just_a_user['id'],
+            password=self.just_a_user['password'])
+        user_token = self.get_requested_token(user_auth)
+
+        self.delete('/auth/tokens', token=admin_token,
+                    headers={'X-Subject-Token': user_token})
diff --git a/keystone-moon/keystone/tests/unit/test_validation.py b/keystone-moon/keystone/tests/unit/test_validation.py
new file mode 100644 (file)
index 0000000..f83cabc
--- /dev/null
@@ -0,0 +1,1563 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import testtools
+
+from keystone.assignment import schema as assignment_schema
+from keystone.catalog import schema as catalog_schema
+from keystone.common import validation
+from keystone.common.validation import parameter_types
+from keystone.common.validation import validators
+from keystone.contrib.endpoint_filter import schema as endpoint_filter_schema
+from keystone.contrib.federation import schema as federation_schema
+from keystone.credential import schema as credential_schema
+from keystone import exception
+from keystone.policy import schema as policy_schema
+from keystone.resource import schema as resource_schema
+from keystone.trust import schema as trust_schema
+
+"""Example model to validate create requests against. Assume that this is
+the only backend for the create and validate schemas. This is just an
+example to show how a backend can be used to construct a schema. In
+Keystone, schemas are built according to the Identity API and the backends
+available in Keystone. This example does not mean that all schema in
+Keystone were strictly based on the SQL backends.
+
+class Entity(sql.ModelBase):
+    __tablename__ = 'entity'
+    attributes = ['id', 'name', 'domain_id', 'description']
+    id = sql.Column(sql.String(64), primary_key=True)
+    name = sql.Column(sql.String(255), nullable=False)
+    description = sql.Column(sql.Text(), nullable=True)
+    enabled = sql.Column(sql.Boolean, default=True, nullable=False)
+    url = sql.Column(sql.String(225), nullable=True)
+    email = sql.Column(sql.String(64), nullable=True)
+"""
+
+# Test schema to validate create requests against
+
+_entity_properties = {
+    'name': parameter_types.name,
+    'description': validation.nullable(parameter_types.description),
+    'enabled': parameter_types.boolean,
+    'url': validation.nullable(parameter_types.url),
+    'email': validation.nullable(parameter_types.email),
+    'id_string': validation.nullable(parameter_types.id_string)
+}
+
+entity_create = {
+    'type': 'object',
+    'properties': _entity_properties,
+    'required': ['name'],
+    'additionalProperties': True,
+}
+
+entity_update = {
+    'type': 'object',
+    'properties': _entity_properties,
+    'minProperties': 1,
+    'additionalProperties': True,
+}
+
+_VALID_ENABLED_FORMATS = [True, False]
+
+_INVALID_ENABLED_FORMATS = ['some string', 1, 0, 'True', 'False']
+
+_VALID_URLS = ['https://example.com', 'http://EXAMPLE.com/v3',
+               'http://localhost', 'http://127.0.0.1:5000',
+               'http://1.1.1.1', 'http://255.255.255.255',
+               'http://[::1]', 'http://[::1]:35357',
+               'http://[1::8]', 'http://[fe80::8%25eth0]',
+               'http://[::1.2.3.4]', 'http://[2001:DB8::1.2.3.4]',
+               'http://[::a:1.2.3.4]', 'http://[a::b:1.2.3.4]',
+               'http://[1:2:3:4:5:6:7:8]', 'http://[1:2:3:4:5:6:1.2.3.4]',
+               'http://[abcd:efAB:CDEF:1111:9999::]']
+
+_INVALID_URLS = [False, 'this is not a URL', 1234, 'www.example.com',
+                 'localhost', 'http//something.com',
+                 'https//something.com']
+
+_VALID_FILTERS = [{'interface': 'admin'},
+                  {'region': 'US-WEST',
+                   'interface': 'internal'}]
+
+_INVALID_FILTERS = ['some string', 1, 0, True, False]
+
+
+class EntityValidationTestCase(testtools.TestCase):
+
+    def setUp(self):
+        super(EntityValidationTestCase, self).setUp()
+        self.resource_name = 'some resource name'
+        self.description = 'Some valid description'
+        self.valid_enabled = True
+        self.valid_url = 'http://example.com'
+        self.valid_email = 'joe@example.com'
+        self.create_schema_validator = validators.SchemaValidator(
+            entity_create)
+        self.update_schema_validator = validators.SchemaValidator(
+            entity_update)
+
+    def test_create_entity_with_all_valid_parameters_validates(self):
+        """Validate all parameter values against test schema."""
+        request_to_validate = {'name': self.resource_name,
+                               'description': self.description,
+                               'enabled': self.valid_enabled,
+                               'url': self.valid_url,
+                               'email': self.valid_email}
+        self.create_schema_validator.validate(request_to_validate)
+
+    def test_create_entity_with_only_required_valid_parameters_validates(self):
+        """Validate correct for only parameters values against test schema."""
+        request_to_validate = {'name': self.resource_name}
+        self.create_schema_validator.validate(request_to_validate)
+
+    def test_create_entity_with_name_too_long_raises_exception(self):
+        """Validate long names.
+
+        Validate that an exception is raised when validating a string of 255+
+        characters passed in as a name.
+        """
+        invalid_name = 'a' * 256
+        request_to_validate = {'name': invalid_name}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_schema_validator.validate,
+                          request_to_validate)
+
+    def test_create_entity_with_name_too_short_raises_exception(self):
+        """Validate short names.
+
+        Test that an exception is raised when passing a string of length
+        zero as a name parameter.
+        """
+        request_to_validate = {'name': ''}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_schema_validator.validate,
+                          request_to_validate)
+
+    def test_create_entity_with_unicode_name_validates(self):
+        """Test that we successfully validate a unicode string."""
+        request_to_validate = {'name': u'αβγδ'}
+        self.create_schema_validator.validate(request_to_validate)
+
+    def test_create_entity_with_invalid_enabled_format_raises_exception(self):
+        """Validate invalid enabled formats.
+
+        Test that an exception is raised when passing invalid boolean-like
+        values as `enabled`.
+        """
+        for format in _INVALID_ENABLED_FORMATS:
+            request_to_validate = {'name': self.resource_name,
+                                   'enabled': format}
+            self.assertRaises(exception.SchemaValidationError,
+                              self.create_schema_validator.validate,
+                              request_to_validate)
+
+    def test_create_entity_with_valid_enabled_formats_validates(self):
+        """Validate valid enabled formats.
+
+        Test that we have successful validation on boolean values for
+        `enabled`.
+        """
+        for valid_enabled in _VALID_ENABLED_FORMATS:
+            request_to_validate = {'name': self.resource_name,
+                                   'enabled': valid_enabled}
+            # Make sure validation doesn't raise a validation exception
+            self.create_schema_validator.validate(request_to_validate)
+
+    def test_create_entity_with_valid_urls_validates(self):
+        """Test that proper urls are successfully validated."""
+        for valid_url in _VALID_URLS:
+            request_to_validate = {'name': self.resource_name,
+                                   'url': valid_url}
+            self.create_schema_validator.validate(request_to_validate)
+
+    def test_create_entity_with_invalid_urls_fails(self):
+        """Test that an exception is raised when validating improper urls."""
+        for invalid_url in _INVALID_URLS:
+            request_to_validate = {'name': self.resource_name,
+                                   'url': invalid_url}
+            self.assertRaises(exception.SchemaValidationError,
+                              self.create_schema_validator.validate,
+                              request_to_validate)
+
+    def test_create_entity_with_valid_email_validates(self):
+        """Validate email address
+
+        Test that we successfully validate properly formatted email
+        addresses.
+        """
+        request_to_validate = {'name': self.resource_name,
+                               'email': self.valid_email}
+        self.create_schema_validator.validate(request_to_validate)
+
+    def test_create_entity_with_invalid_email_fails(self):
+        """Validate invalid email address.
+
+        Test that an exception is raised when validating improperly
+        formatted email addresses.
+        """
+        request_to_validate = {'name': self.resource_name,
+                               'email': 'some invalid email value'}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_schema_validator.validate,
+                          request_to_validate)
+
+    def test_create_entity_with_valid_id_strings(self):
+        """Validate acceptable id strings."""
+        valid_id_strings = [str(uuid.uuid4()), uuid.uuid4().hex, 'default']
+        for valid_id in valid_id_strings:
+            request_to_validate = {'name': self.resource_name,
+                                   'id_string': valid_id}
+            self.create_schema_validator.validate(request_to_validate)
+
+    def test_create_entity_with_invalid_id_strings(self):
+        """Exception raised when using invalid id strings."""
+        long_string = 'A' * 65
+        invalid_id_strings = ['', long_string, 'this,should,fail']
+        for invalid_id in invalid_id_strings:
+            request_to_validate = {'name': self.resource_name,
+                                   'id_string': invalid_id}
+            self.assertRaises(exception.SchemaValidationError,
+                              self.create_schema_validator.validate,
+                              request_to_validate)
+
+    def test_create_entity_with_null_id_string(self):
+        """Validate that None is an acceptable optional string type."""
+        request_to_validate = {'name': self.resource_name,
+                               'id_string': None}
+        self.create_schema_validator.validate(request_to_validate)
+
+    def test_create_entity_with_null_string_succeeds(self):
+        """Exception raised when passing None on required id strings."""
+        request_to_validate = {'name': self.resource_name,
+                               'id_string': None}
+        self.create_schema_validator.validate(request_to_validate)
+
+    def test_update_entity_with_no_parameters_fails(self):
+        """At least one parameter needs to be present for an update."""
+        request_to_validate = {}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_schema_validator.validate,
+                          request_to_validate)
+
+    def test_update_entity_with_all_parameters_valid_validates(self):
+        """Simulate updating an entity by ID."""
+        request_to_validate = {'name': self.resource_name,
+                               'description': self.description,
+                               'enabled': self.valid_enabled,
+                               'url': self.valid_url,
+                               'email': self.valid_email}
+        self.update_schema_validator.validate(request_to_validate)
+
+    def test_update_entity_with_a_valid_required_parameter_validates(self):
+        """Succeed if a valid required parameter is provided."""
+        request_to_validate = {'name': self.resource_name}
+        self.update_schema_validator.validate(request_to_validate)
+
+    def test_update_entity_with_invalid_required_parameter_fails(self):
+        """Fail if a provided required parameter is invalid."""
+        request_to_validate = {'name': 'a' * 256}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_schema_validator.validate,
+                          request_to_validate)
+
+    def test_update_entity_with_a_null_optional_parameter_validates(self):
+        """Optional parameters can be null to removed the value."""
+        request_to_validate = {'email': None}
+        self.update_schema_validator.validate(request_to_validate)
+
+    def test_update_entity_with_a_required_null_parameter_fails(self):
+        """The `name` parameter can't be null."""
+        request_to_validate = {'name': None}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_schema_validator.validate,
+                          request_to_validate)
+
+    def test_update_entity_with_a_valid_optional_parameter_validates(self):
+        """Succeeds with only a single valid optional parameter."""
+        request_to_validate = {'email': self.valid_email}
+        self.update_schema_validator.validate(request_to_validate)
+
+    def test_update_entity_with_invalid_optional_parameter_fails(self):
+        """Fails when an optional parameter is invalid."""
+        request_to_validate = {'email': 0}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_schema_validator.validate,
+                          request_to_validate)
+
+
+class ProjectValidationTestCase(testtools.TestCase):
+    """Test for V3 Project API validation."""
+
+    def setUp(self):
+        super(ProjectValidationTestCase, self).setUp()
+
+        self.project_name = 'My Project'
+
+        create = resource_schema.project_create
+        update = resource_schema.project_update
+        self.create_project_validator = validators.SchemaValidator(create)
+        self.update_project_validator = validators.SchemaValidator(update)
+
+    def test_validate_project_request(self):
+        """Test that we validate a project with `name` in request."""
+        request_to_validate = {'name': self.project_name}
+        self.create_project_validator.validate(request_to_validate)
+
+    def test_validate_project_request_without_name_fails(self):
+        """Validate project request fails without name."""
+        request_to_validate = {'enabled': True}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_project_validator.validate,
+                          request_to_validate)
+
+    def test_validate_project_request_with_enabled(self):
+        """Validate `enabled` as boolean-like values for projects."""
+        for valid_enabled in _VALID_ENABLED_FORMATS:
+            request_to_validate = {'name': self.project_name,
+                                   'enabled': valid_enabled}
+            self.create_project_validator.validate(request_to_validate)
+
+    def test_validate_project_request_with_invalid_enabled_fails(self):
+        """Exception is raised when `enabled` isn't a boolean-like value."""
+        for invalid_enabled in _INVALID_ENABLED_FORMATS:
+            request_to_validate = {'name': self.project_name,
+                                   'enabled': invalid_enabled}
+            self.assertRaises(exception.SchemaValidationError,
+                              self.create_project_validator.validate,
+                              request_to_validate)
+
+    def test_validate_project_request_with_valid_description(self):
+        """Test that we validate `description` in create project requests."""
+        request_to_validate = {'name': self.project_name,
+                               'description': 'My Project'}
+        self.create_project_validator.validate(request_to_validate)
+
+    def test_validate_project_request_with_invalid_description_fails(self):
+        """Exception is raised when `description` as a non-string value."""
+        request_to_validate = {'name': self.project_name,
+                               'description': False}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_project_validator.validate,
+                          request_to_validate)
+
+    def test_validate_project_request_with_name_too_long(self):
+        """Exception is raised when `name` is too long."""
+        long_project_name = 'a' * 65
+        request_to_validate = {'name': long_project_name}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_project_validator.validate,
+                          request_to_validate)
+
+    def test_validate_project_request_with_name_too_short(self):
+        """Exception raised when `name` is too short."""
+        request_to_validate = {'name': ''}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_project_validator.validate,
+                          request_to_validate)
+
+    def test_validate_project_request_with_valid_parent_id(self):
+        """Test that we validate `parent_id` in create project requests."""
+        # parent_id is nullable
+        request_to_validate = {'name': self.project_name,
+                               'parent_id': None}
+        self.create_project_validator.validate(request_to_validate)
+        request_to_validate = {'name': self.project_name,
+                               'parent_id': uuid.uuid4().hex}
+        self.create_project_validator.validate(request_to_validate)
+
+    def test_validate_project_request_with_invalid_parent_id_fails(self):
+        """Exception is raised when `parent_id` as a non-id value."""
+        request_to_validate = {'name': self.project_name,
+                               'parent_id': False}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_project_validator.validate,
+                          request_to_validate)
+        request_to_validate = {'name': self.project_name,
+                               'parent_id': 'fake project'}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_project_validator.validate,
+                          request_to_validate)
+
+    def test_validate_project_update_request(self):
+        """Test that we validate a project update request."""
+        request_to_validate = {'domain_id': uuid.uuid4().hex}
+        self.update_project_validator.validate(request_to_validate)
+
+    def test_validate_project_update_request_with_no_parameters_fails(self):
+        """Exception is raised when updating project without parameters."""
+        request_to_validate = {}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_project_validator.validate,
+                          request_to_validate)
+
+    def test_validate_project_update_request_with_name_too_long_fails(self):
+        """Exception raised when updating a project with `name` too long."""
+        long_project_name = 'a' * 65
+        request_to_validate = {'name': long_project_name}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_project_validator.validate,
+                          request_to_validate)
+
+    def test_validate_project_update_request_with_name_too_short_fails(self):
+        """Exception raised when updating a project with `name` too short."""
+        request_to_validate = {'name': ''}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_project_validator.validate,
+                          request_to_validate)
+
+    def test_validate_project_update_request_with_null_domain_id_fails(self):
+        request_to_validate = {'domain_id': None}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_project_validator.validate,
+                          request_to_validate)
+
+
+class DomainValidationTestCase(testtools.TestCase):
+    """Test for V3 Domain API validation."""
+
+    def setUp(self):
+        super(DomainValidationTestCase, self).setUp()
+
+        self.domain_name = 'My Domain'
+
+        create = resource_schema.domain_create
+        update = resource_schema.domain_update
+        self.create_domain_validator = validators.SchemaValidator(create)
+        self.update_domain_validator = validators.SchemaValidator(update)
+
+    def test_validate_domain_request(self):
+        """Make sure we successfully validate a create domain request."""
+        request_to_validate = {'name': self.domain_name}
+        self.create_domain_validator.validate(request_to_validate)
+
+    def test_validate_domain_request_without_name_fails(self):
+        """Make sure we raise an exception when `name` isn't included."""
+        request_to_validate = {'enabled': True}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_domain_validator.validate,
+                          request_to_validate)
+
+    def test_validate_domain_request_with_enabled(self):
+        """Validate `enabled` as boolean-like values for domains."""
+        for valid_enabled in _VALID_ENABLED_FORMATS:
+            request_to_validate = {'name': self.domain_name,
+                                   'enabled': valid_enabled}
+            self.create_domain_validator.validate(request_to_validate)
+
+    def test_validate_domain_request_with_invalid_enabled_fails(self):
+        """Exception is raised when `enabled` isn't a boolean-like value."""
+        for invalid_enabled in _INVALID_ENABLED_FORMATS:
+            request_to_validate = {'name': self.domain_name,
+                                   'enabled': invalid_enabled}
+            self.assertRaises(exception.SchemaValidationError,
+                              self.create_domain_validator.validate,
+                              request_to_validate)
+
+    def test_validate_domain_request_with_valid_description(self):
+        """Test that we validate `description` in create domain requests."""
+        request_to_validate = {'name': self.domain_name,
+                               'description': 'My Domain'}
+        self.create_domain_validator.validate(request_to_validate)
+
+    def test_validate_domain_request_with_invalid_description_fails(self):
+        """Exception is raised when `description` is a non-string value."""
+        request_to_validate = {'name': self.domain_name,
+                               'description': False}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_domain_validator.validate,
+                          request_to_validate)
+
+    def test_validate_domain_request_with_name_too_long(self):
+        """Exception is raised when `name` is too long."""
+        long_domain_name = 'a' * 65
+        request_to_validate = {'name': long_domain_name}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_domain_validator.validate,
+                          request_to_validate)
+
+    def test_validate_domain_request_with_name_too_short(self):
+        """Exception raised when `name` is too short."""
+        request_to_validate = {'name': ''}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_domain_validator.validate,
+                          request_to_validate)
+
+    def test_validate_domain_update_request(self):
+        """Test that we validate a domain update request."""
+        request_to_validate = {'domain_id': uuid.uuid4().hex}
+        self.update_domain_validator.validate(request_to_validate)
+
+    def test_validate_domain_update_request_with_no_parameters_fails(self):
+        """Exception is raised when updating a domain without parameters."""
+        request_to_validate = {}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_domain_validator.validate,
+                          request_to_validate)
+
+    def test_validate_domain_update_request_with_name_too_long_fails(self):
+        """Exception raised when updating a domain with `name` too long."""
+        long_domain_name = 'a' * 65
+        request_to_validate = {'name': long_domain_name}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_domain_validator.validate,
+                          request_to_validate)
+
+    def test_validate_domain_update_request_with_name_too_short_fails(self):
+        """Exception raised when updating a domain with `name` too short."""
+        request_to_validate = {'name': ''}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_domain_validator.validate,
+                          request_to_validate)
+
+
+class RoleValidationTestCase(testtools.TestCase):
+    """Test for V3 Role API validation."""
+
+    def setUp(self):
+        super(RoleValidationTestCase, self).setUp()
+
+        self.role_name = 'My Role'
+
+        create = assignment_schema.role_create
+        update = assignment_schema.role_update
+        self.create_role_validator = validators.SchemaValidator(create)
+        self.update_role_validator = validators.SchemaValidator(update)
+
+    def test_validate_role_request(self):
+        """Test we can successfully validate a create role request."""
+        request_to_validate = {'name': self.role_name}
+        self.create_role_validator.validate(request_to_validate)
+
+    def test_validate_role_create_without_name_raises_exception(self):
+        """Test that we raise an exception when `name` isn't included."""
+        request_to_validate = {'enabled': True}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_role_validator.validate,
+                          request_to_validate)
+
+    def test_validate_role_create_when_name_is_not_string_fails(self):
+        """Exception is raised on role create with a non-string `name`."""
+        request_to_validate = {'name': True}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_role_validator.validate,
+                          request_to_validate)
+        request_to_validate = {'name': 24}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_role_validator.validate,
+                          request_to_validate)
+
+    def test_validate_role_update_request(self):
+        """Test that we validate a role update request."""
+        request_to_validate = {'name': 'My New Role'}
+        self.update_role_validator.validate(request_to_validate)
+
+    def test_validate_role_update_fails_with_invalid_name_fails(self):
+        """Exception when validating an update request with invalid `name`."""
+        request_to_validate = {'name': True}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_role_validator.validate,
+                          request_to_validate)
+
+        request_to_validate = {'name': 24}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_role_validator.validate,
+                          request_to_validate)
+
+
+class PolicyValidationTestCase(testtools.TestCase):
+    """Test for V3 Policy API validation."""
+
+    def setUp(self):
+        super(PolicyValidationTestCase, self).setUp()
+
+        create = policy_schema.policy_create
+        update = policy_schema.policy_update
+        self.create_policy_validator = validators.SchemaValidator(create)
+        self.update_policy_validator = validators.SchemaValidator(update)
+
+    def test_validate_policy_succeeds(self):
+        """Test that we validate a create policy request."""
+        request_to_validate = {'blob': 'some blob information',
+                               'type': 'application/json'}
+        self.create_policy_validator.validate(request_to_validate)
+
+    def test_validate_policy_without_blob_fails(self):
+        """Exception raised without `blob` in request."""
+        request_to_validate = {'type': 'application/json'}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_policy_validator.validate,
+                          request_to_validate)
+
+    def test_validate_policy_without_type_fails(self):
+        """Exception raised without `type` in request."""
+        request_to_validate = {'blob': 'some blob information'}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_policy_validator.validate,
+                          request_to_validate)
+
+    def test_validate_policy_create_with_extra_parameters_succeeds(self):
+        """Validate policy create with extra parameters."""
+        request_to_validate = {'blob': 'some blob information',
+                               'type': 'application/json',
+                               'extra': 'some extra stuff'}
+        self.create_policy_validator.validate(request_to_validate)
+
+    def test_validate_policy_create_with_invalid_type_fails(self):
+        """Exception raised when `blob` and `type` are boolean."""
+        for prop in ['blob', 'type']:
+            request_to_validate = {prop: False}
+            self.assertRaises(exception.SchemaValidationError,
+                              self.create_policy_validator.validate,
+                              request_to_validate)
+
+    def test_validate_policy_update_without_parameters_fails(self):
+        """Exception raised when updating policy without parameters."""
+        request_to_validate = {}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_policy_validator.validate,
+                          request_to_validate)
+
+    def test_validate_policy_update_with_extra_parameters_succeeds(self):
+        """Validate policy update request with extra parameters."""
+        request_to_validate = {'blob': 'some blob information',
+                               'type': 'application/json',
+                               'extra': 'some extra stuff'}
+        self.update_policy_validator.validate(request_to_validate)
+
+    def test_validate_policy_update_succeeds(self):
+        """Test that we validate a policy update request."""
+        request_to_validate = {'blob': 'some blob information',
+                               'type': 'application/json'}
+        self.update_policy_validator.validate(request_to_validate)
+
+    def test_validate_policy_update_with_invalid_type_fails(self):
+        """Exception raised when invalid `type` on policy update."""
+        for prop in ['blob', 'type']:
+            request_to_validate = {prop: False}
+            self.assertRaises(exception.SchemaValidationError,
+                              self.update_policy_validator.validate,
+                              request_to_validate)
+
+
+class CredentialValidationTestCase(testtools.TestCase):
+    """Test for V3 Credential API validation."""
+
+    def setUp(self):
+        super(CredentialValidationTestCase, self).setUp()
+
+        create = credential_schema.credential_create
+        update = credential_schema.credential_update
+        self.create_credential_validator = validators.SchemaValidator(create)
+        self.update_credential_validator = validators.SchemaValidator(update)
+
+    def test_validate_credential_succeeds(self):
+        """Test that we validate a credential request."""
+        request_to_validate = {'blob': 'some string',
+                               'project_id': uuid.uuid4().hex,
+                               'type': 'ec2',
+                               'user_id': uuid.uuid4().hex}
+        self.create_credential_validator.validate(request_to_validate)
+
+    def test_validate_credential_without_blob_fails(self):
+        """Exception raised without `blob` in create request."""
+        request_to_validate = {'type': 'ec2',
+                               'user_id': uuid.uuid4().hex}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_credential_validator.validate,
+                          request_to_validate)
+
+    def test_validate_credential_without_user_id_fails(self):
+        """Exception raised without `user_id` in create request."""
+        request_to_validate = {'blob': 'some credential blob',
+                               'type': 'ec2'}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_credential_validator.validate,
+                          request_to_validate)
+
+    def test_validate_credential_without_type_fails(self):
+        """Exception raised without `type` in create request."""
+        request_to_validate = {'blob': 'some credential blob',
+                               'user_id': uuid.uuid4().hex}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_credential_validator.validate,
+                          request_to_validate)
+
+    def test_validate_credential_ec2_without_project_id_fails(self):
+        """Validate `project_id` is required for ec2.
+
+        Test that a SchemaValidationError is raised when type is ec2
+        and no `project_id` is provided in create request.
+        """
+        request_to_validate = {'blob': 'some credential blob',
+                               'type': 'ec2',
+                               'user_id': uuid.uuid4().hex}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_credential_validator.validate,
+                          request_to_validate)
+
+    def test_validate_credential_with_project_id_succeeds(self):
+        """Test that credential request works for all types."""
+        cred_types = ['ec2', 'cert', uuid.uuid4().hex]
+
+        for c_type in cred_types:
+            request_to_validate = {'blob': 'some blob',
+                                   'project_id': uuid.uuid4().hex,
+                                   'type': c_type,
+                                   'user_id': uuid.uuid4().hex}
+            # Make sure an exception isn't raised
+            self.create_credential_validator.validate(request_to_validate)
+
+    def test_validate_credential_non_ec2_without_project_id_succeeds(self):
+        """Validate `project_id` is not required for non-ec2.
+
+        Test that create request without `project_id` succeeds for any
+        non-ec2 credential.
+        """
+        cred_types = ['cert', uuid.uuid4().hex]
+
+        for c_type in cred_types:
+            request_to_validate = {'blob': 'some blob',
+                                   'type': c_type,
+                                   'user_id': uuid.uuid4().hex}
+            # Make sure an exception isn't raised
+            self.create_credential_validator.validate(request_to_validate)
+
+    def test_validate_credential_with_extra_parameters_succeeds(self):
+        """Validate create request with extra parameters."""
+        request_to_validate = {'blob': 'some string',
+                               'extra': False,
+                               'project_id': uuid.uuid4().hex,
+                               'type': 'ec2',
+                               'user_id': uuid.uuid4().hex}
+        self.create_credential_validator.validate(request_to_validate)
+
+    def test_validate_credential_update_succeeds(self):
+        """Test that a credential request is properly validated."""
+        request_to_validate = {'blob': 'some string',
+                               'project_id': uuid.uuid4().hex,
+                               'type': 'ec2',
+                               'user_id': uuid.uuid4().hex}
+        self.update_credential_validator.validate(request_to_validate)
+
+    def test_validate_credential_update_without_parameters_fails(self):
+        """Exception is raised on update without parameters."""
+        request_to_validate = {}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_credential_validator.validate,
+                          request_to_validate)
+
+    def test_validate_credential_update_with_extra_parameters_succeeds(self):
+        """Validate credential update with extra parameters."""
+        request_to_validate = {'blob': 'some string',
+                               'extra': False,
+                               'project_id': uuid.uuid4().hex,
+                               'type': 'ec2',
+                               'user_id': uuid.uuid4().hex}
+        self.update_credential_validator.validate(request_to_validate)
+
+
+class RegionValidationTestCase(testtools.TestCase):
+    """Test for V3 Region API validation."""
+
+    def setUp(self):
+        super(RegionValidationTestCase, self).setUp()
+
+        self.region_name = 'My Region'
+
+        create = catalog_schema.region_create
+        update = catalog_schema.region_update
+        self.create_region_validator = validators.SchemaValidator(create)
+        self.update_region_validator = validators.SchemaValidator(update)
+
+    def test_validate_region_request(self):
+        """Test that we validate a basic region request."""
+        # Create_region doesn't take any parameters in the request so let's
+        # make sure we cover that case.
+        request_to_validate = {}
+        self.create_region_validator.validate(request_to_validate)
+
+    def test_validate_region_create_request_with_parameters(self):
+        """Test that we validate a region request with parameters."""
+        request_to_validate = {'id': 'us-east',
+                               'description': 'US East Region',
+                               'parent_region_id': 'US Region'}
+        self.create_region_validator.validate(request_to_validate)
+
+    def test_validate_region_create_with_uuid(self):
+        """Test that we validate a region request with a UUID as the id."""
+        request_to_validate = {'id': uuid.uuid4().hex,
+                               'description': 'US East Region',
+                               'parent_region_id': uuid.uuid4().hex}
+        self.create_region_validator.validate(request_to_validate)
+
+    def test_validate_region_create_succeeds_with_extra_parameters(self):
+        """Validate create region request with extra values."""
+        request_to_validate = {'other_attr': uuid.uuid4().hex}
+        self.create_region_validator.validate(request_to_validate)
+
+    def test_validate_region_update_succeeds(self):
+        """Test that we validate a region update request."""
+        request_to_validate = {'id': 'us-west',
+                               'description': 'US West Region',
+                               'parent_region_id': 'us-region'}
+        self.update_region_validator.validate(request_to_validate)
+
+    def test_validate_region_update_succeeds_with_extra_parameters(self):
+        """Validate extra attributes in the region update request."""
+        request_to_validate = {'other_attr': uuid.uuid4().hex}
+        self.update_region_validator.validate(request_to_validate)
+
+    def test_validate_region_update_fails_with_no_parameters(self):
+        """Exception raised when passing no parameters in a region update."""
+        # An update request should consist of at least one value to update
+        request_to_validate = {}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_region_validator.validate,
+                          request_to_validate)
+
+
+class ServiceValidationTestCase(testtools.TestCase):
+    """Test for V3 Service API validation."""
+
+    def setUp(self):
+        super(ServiceValidationTestCase, self).setUp()
+
+        create = catalog_schema.service_create
+        update = catalog_schema.service_update
+        self.create_service_validator = validators.SchemaValidator(create)
+        self.update_service_validator = validators.SchemaValidator(update)
+
+    def test_validate_service_create_succeeds(self):
+        """Test that we validate a service create request."""
+        request_to_validate = {'name': 'Nova',
+                               'description': 'OpenStack Compute Service',
+                               'enabled': True,
+                               'type': 'compute'}
+        self.create_service_validator.validate(request_to_validate)
+
+    def test_validate_service_create_succeeds_with_required_parameters(self):
+        """Validate a service create request with the required parameters."""
+        # The only parameter type required for service creation is 'type'
+        request_to_validate = {'type': 'compute'}
+        self.create_service_validator.validate(request_to_validate)
+
+    def test_validate_service_create_fails_without_type(self):
+        """Exception raised when trying to create a service without `type`."""
+        request_to_validate = {'name': 'Nova'}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_service_validator.validate,
+                          request_to_validate)
+
+    def test_validate_service_create_succeeds_with_extra_parameters(self):
+        """Test that extra parameters pass validation on create service."""
+        request_to_validate = {'other_attr': uuid.uuid4().hex,
+                               'type': uuid.uuid4().hex}
+        self.create_service_validator.validate(request_to_validate)
+
+    def test_validate_service_create_succeeds_with_valid_enabled(self):
+        """Validate boolean values as enabled values on service create."""
+        for valid_enabled in _VALID_ENABLED_FORMATS:
+            request_to_validate = {'enabled': valid_enabled,
+                                   'type': uuid.uuid4().hex}
+            self.create_service_validator.validate(request_to_validate)
+
+    def test_validate_service_create_fails_with_invalid_enabled(self):
+        """Exception raised when boolean-like parameters as `enabled`
+
+        On service create, make sure an exception is raised if `enabled` is
+        not a boolean value.
+        """
+        for invalid_enabled in _INVALID_ENABLED_FORMATS:
+            request_to_validate = {'enabled': invalid_enabled,
+                                   'type': uuid.uuid4().hex}
+            self.assertRaises(exception.SchemaValidationError,
+                              self.create_service_validator.validate,
+                              request_to_validate)
+
+    def test_validate_service_create_fails_when_name_too_long(self):
+        """Exception raised when `name` is greater than 255 characters."""
+        long_name = 'a' * 256
+        request_to_validate = {'type': 'compute',
+                               'name': long_name}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_service_validator.validate,
+                          request_to_validate)
+
+    def test_validate_service_create_fails_when_name_too_short(self):
+        """Exception is raised when `name` is too short."""
+        request_to_validate = {'type': 'compute',
+                               'name': ''}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_service_validator.validate,
+                          request_to_validate)
+
+    def test_validate_service_create_fails_when_type_too_long(self):
+        """Exception is raised when `type` is too long."""
+        long_type_name = 'a' * 256
+        request_to_validate = {'type': long_type_name}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_service_validator.validate,
+                          request_to_validate)
+
+    def test_validate_service_create_fails_when_type_too_short(self):
+        """Exception is raised when `type` is too short."""
+        request_to_validate = {'type': ''}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_service_validator.validate,
+                          request_to_validate)
+
+    def test_validate_service_update_request_succeeds(self):
+        """Test that we validate a service update request."""
+        request_to_validate = {'name': 'Cinder',
+                               'type': 'volume',
+                               'description': 'OpenStack Block Storage',
+                               'enabled': False}
+        self.update_service_validator.validate(request_to_validate)
+
+    def test_validate_service_update_fails_with_no_parameters(self):
+        """Exception raised when updating a service without values."""
+        request_to_validate = {}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_service_validator.validate,
+                          request_to_validate)
+
+    def test_validate_service_update_succeeds_with_extra_parameters(self):
+        """Validate updating a service with extra parameters."""
+        request_to_validate = {'other_attr': uuid.uuid4().hex}
+        self.update_service_validator.validate(request_to_validate)
+
+    def test_validate_service_update_succeeds_with_valid_enabled(self):
+        """Validate boolean formats as `enabled` on service update."""
+        for valid_enabled in _VALID_ENABLED_FORMATS:
+            request_to_validate = {'enabled': valid_enabled}
+            self.update_service_validator.validate(request_to_validate)
+
+    def test_validate_service_update_fails_with_invalid_enabled(self):
+        """Exception raised when boolean-like values as `enabled`."""
+        for invalid_enabled in _INVALID_ENABLED_FORMATS:
+            request_to_validate = {'enabled': invalid_enabled}
+            self.assertRaises(exception.SchemaValidationError,
+                              self.update_service_validator.validate,
+                              request_to_validate)
+
+    def test_validate_service_update_fails_with_name_too_long(self):
+        """Exception is raised when `name` is too long on update."""
+        long_name = 'a' * 256
+        request_to_validate = {'name': long_name}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_service_validator.validate,
+                          request_to_validate)
+
+    def test_validate_service_update_fails_with_name_too_short(self):
+        """Exception is raised when `name` is too short on update."""
+        request_to_validate = {'name': ''}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_service_validator.validate,
+                          request_to_validate)
+
+    def test_validate_service_update_fails_with_type_too_long(self):
+        """Exception is raised when `type` is too long on update."""
+        long_type_name = 'a' * 256
+        request_to_validate = {'type': long_type_name}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_service_validator.validate,
+                          request_to_validate)
+
+    def test_validate_service_update_fails_with_type_too_short(self):
+        """Exception is raised when `type` is too short on update."""
+        request_to_validate = {'type': ''}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_service_validator.validate,
+                          request_to_validate)
+
+
+class EndpointValidationTestCase(testtools.TestCase):
+    """Test for V3 Endpoint API validation."""
+
+    def setUp(self):
+        super(EndpointValidationTestCase, self).setUp()
+
+        create = catalog_schema.endpoint_create
+        update = catalog_schema.endpoint_update
+        self.create_endpoint_validator = validators.SchemaValidator(create)
+        self.update_endpoint_validator = validators.SchemaValidator(update)
+
+    def test_validate_endpoint_request_succeeds(self):
+        """Test that we validate an endpoint request."""
+        request_to_validate = {'enabled': True,
+                               'interface': 'admin',
+                               'region_id': uuid.uuid4().hex,
+                               'service_id': uuid.uuid4().hex,
+                               'url': 'https://service.example.com:5000/'}
+        self.create_endpoint_validator.validate(request_to_validate)
+
+    def test_validate_endpoint_create_succeeds_with_required_parameters(self):
+        """Validate an endpoint request with only the required parameters."""
+        # According to the Identity V3 API endpoint creation requires
+        # 'service_id', 'interface', and 'url'
+        request_to_validate = {'service_id': uuid.uuid4().hex,
+                               'interface': 'public',
+                               'url': 'https://service.example.com:5000/'}
+        self.create_endpoint_validator.validate(request_to_validate)
+
+    def test_validate_endpoint_create_succeeds_with_valid_enabled(self):
+        """Validate an endpoint with boolean values.
+
+        Validate boolean values as `enabled` in endpoint create requests.
+        """
+        for valid_enabled in _VALID_ENABLED_FORMATS:
+            request_to_validate = {'enabled': valid_enabled,
+                                   'service_id': uuid.uuid4().hex,
+                                   'interface': 'public',
+                                   'url': 'https://service.example.com:5000/'}
+            self.create_endpoint_validator.validate(request_to_validate)
+
+    def test_validate_create_endpoint_fails_with_invalid_enabled(self):
+        """Exception raised when boolean-like values as `enabled`."""
+        for invalid_enabled in _INVALID_ENABLED_FORMATS:
+            request_to_validate = {'enabled': invalid_enabled,
+                                   'service_id': uuid.uuid4().hex,
+                                   'interface': 'public',
+                                   'url': 'https://service.example.com:5000/'}
+            self.assertRaises(exception.SchemaValidationError,
+                              self.create_endpoint_validator.validate,
+                              request_to_validate)
+
+    def test_validate_endpoint_create_succeeds_with_extra_parameters(self):
+        """Test that extra parameters pass validation on create endpoint."""
+        request_to_validate = {'other_attr': uuid.uuid4().hex,
+                               'service_id': uuid.uuid4().hex,
+                               'interface': 'public',
+                               'url': 'https://service.example.com:5000/'}
+        self.create_endpoint_validator.validate(request_to_validate)
+
+    def test_validate_endpoint_create_fails_without_service_id(self):
+        """Exception raised when `service_id` isn't in endpoint request."""
+        request_to_validate = {'interface': 'public',
+                               'url': 'https://service.example.com:5000/'}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_endpoint_validator.validate,
+                          request_to_validate)
+
+    def test_validate_endpoint_create_fails_without_interface(self):
+        """Exception raised when `interface` isn't in endpoint request."""
+        request_to_validate = {'service_id': uuid.uuid4().hex,
+                               'url': 'https://service.example.com:5000/'}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_endpoint_validator.validate,
+                          request_to_validate)
+
+    def test_validate_endpoint_create_fails_without_url(self):
+        """Exception raised when `url` isn't in endpoint request."""
+        request_to_validate = {'service_id': uuid.uuid4().hex,
+                               'interface': 'public'}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_endpoint_validator.validate,
+                          request_to_validate)
+
+    def test_validate_endpoint_create_succeeds_with_url(self):
+        """Validate `url` attribute in endpoint create request."""
+        request_to_validate = {'service_id': uuid.uuid4().hex,
+                               'interface': 'public'}
+        for url in _VALID_URLS:
+            request_to_validate['url'] = url
+            self.create_endpoint_validator.validate(request_to_validate)
+
+    def test_validate_endpoint_create_fails_with_invalid_url(self):
+        """Exception raised when passing invalid `url` in request."""
+        request_to_validate = {'service_id': uuid.uuid4().hex,
+                               'interface': 'public'}
+        for url in _INVALID_URLS:
+            request_to_validate['url'] = url
+            self.assertRaises(exception.SchemaValidationError,
+                              self.create_endpoint_validator.validate,
+                              request_to_validate)
+
+    def test_validate_endpoint_create_fails_with_invalid_interface(self):
+        """Exception raised with invalid `interface`."""
+        request_to_validate = {'interface': uuid.uuid4().hex,
+                               'service_id': uuid.uuid4().hex,
+                               'url': 'https://service.example.com:5000/'}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_endpoint_validator.validate,
+                          request_to_validate)
+
+    def test_validate_endpoint_update_fails_with_invalid_enabled(self):
+        """Exception raised when `enabled` is boolean-like value."""
+        for invalid_enabled in _INVALID_ENABLED_FORMATS:
+            request_to_validate = {'enabled': invalid_enabled}
+            self.assertRaises(exception.SchemaValidationError,
+                              self.update_endpoint_validator.validate,
+                              request_to_validate)
+
+    def test_validate_endpoint_update_succeeds_with_valid_enabled(self):
+        """Validate `enabled` as boolean values."""
+        for valid_enabled in _VALID_ENABLED_FORMATS:
+            request_to_validate = {'enabled': valid_enabled}
+            self.update_endpoint_validator.validate(request_to_validate)
+
+    def test_validate_endpoint_update_fails_with_invalid_interface(self):
+        """Exception raised when invalid `interface` on endpoint update."""
+        request_to_validate = {'interface': uuid.uuid4().hex,
+                               'service_id': uuid.uuid4().hex,
+                               'url': 'https://service.example.com:5000/'}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_endpoint_validator.validate,
+                          request_to_validate)
+
+    def test_validate_endpoint_update_request_succeeds(self):
+        """Test that we validate an endpoint update request."""
+        request_to_validate = {'enabled': True,
+                               'interface': 'admin',
+                               'region_id': uuid.uuid4().hex,
+                               'service_id': uuid.uuid4().hex,
+                               'url': 'https://service.example.com:5000/'}
+        self.update_endpoint_validator.validate(request_to_validate)
+
+    def test_validate_endpoint_update_fails_with_no_parameters(self):
+        """Exception raised when no parameters on endpoint update."""
+        request_to_validate = {}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_endpoint_validator.validate,
+                          request_to_validate)
+
+    def test_validate_endpoint_update_succeeds_with_extra_parameters(self):
+        """Test that extra parameters pass validation on update endpoint."""
+        request_to_validate = {'enabled': True,
+                               'interface': 'admin',
+                               'region_id': uuid.uuid4().hex,
+                               'service_id': uuid.uuid4().hex,
+                               'url': 'https://service.example.com:5000/',
+                               'other_attr': uuid.uuid4().hex}
+        self.update_endpoint_validator.validate(request_to_validate)
+
+    def test_validate_endpoint_update_succeeds_with_url(self):
+        """Validate `url` attribute in endpoint update request."""
+        request_to_validate = {'service_id': uuid.uuid4().hex,
+                               'interface': 'public'}
+        for url in _VALID_URLS:
+            request_to_validate['url'] = url
+            self.update_endpoint_validator.validate(request_to_validate)
+
+    def test_validate_endpoint_update_fails_with_invalid_url(self):
+        """Exception raised when passing invalid `url` in request."""
+        request_to_validate = {'service_id': uuid.uuid4().hex,
+                               'interface': 'public'}
+        for url in _INVALID_URLS:
+            request_to_validate['url'] = url
+            self.assertRaises(exception.SchemaValidationError,
+                              self.update_endpoint_validator.validate,
+                              request_to_validate)
+
+
+class EndpointGroupValidationTestCase(testtools.TestCase):
+    """Test for V3 Endpoint Group API validation."""
+
+    def setUp(self):
+        super(EndpointGroupValidationTestCase, self).setUp()
+
+        create = endpoint_filter_schema.endpoint_group_create
+        update = endpoint_filter_schema.endpoint_group_update
+        self.create_endpoint_grp_validator = validators.SchemaValidator(create)
+        self.update_endpoint_grp_validator = validators.SchemaValidator(update)
+
+    def test_validate_endpoint_group_request_succeeds(self):
+        """Test that we validate an endpoint group request."""
+        request_to_validate = {'description': 'endpoint group description',
+                               'filters': {'interface': 'admin'},
+                               'name': 'endpoint_group_name'}
+        self.create_endpoint_grp_validator.validate(request_to_validate)
+
+    def test_validate_endpoint_group_create_succeeds_with_req_parameters(self):
+        """Validate required endpoint group parameters.
+
+        This test ensure that validation succeeds with only the required
+        parameters passed for creating an endpoint group.
+        """
+        request_to_validate = {'filters': {'interface': 'admin'},
+                               'name': 'endpoint_group_name'}
+        self.create_endpoint_grp_validator.validate(request_to_validate)
+
+    def test_validate_endpoint_group_create_succeeds_with_valid_filters(self):
+        """Validate dict values as `filters` in endpoint group create requests.
+        """
+        request_to_validate = {'description': 'endpoint group description',
+                               'name': 'endpoint_group_name'}
+        for valid_filters in _VALID_FILTERS:
+            request_to_validate['filters'] = valid_filters
+            self.create_endpoint_grp_validator.validate(request_to_validate)
+
+    def test_validate_create_endpoint_group_fails_with_invalid_filters(self):
+        """Validate invalid `filters` value in endpoint group parameters.
+
+        This test ensures that exception is raised when non-dict values is
+        used as `filters` in endpoint group create request.
+        """
+        request_to_validate = {'description': 'endpoint group description',
+                               'name': 'endpoint_group_name'}
+        for invalid_filters in _INVALID_FILTERS:
+            request_to_validate['filters'] = invalid_filters
+            self.assertRaises(exception.SchemaValidationError,
+                              self.create_endpoint_grp_validator.validate,
+                              request_to_validate)
+
+    def test_validate_endpoint_group_create_fails_without_name(self):
+        """Exception raised when `name` isn't in endpoint group request."""
+        request_to_validate = {'description': 'endpoint group description',
+                               'filters': {'interface': 'admin'}}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_endpoint_grp_validator.validate,
+                          request_to_validate)
+
+    def test_validate_endpoint_group_create_fails_without_filters(self):
+        """Exception raised when `filters` isn't in endpoint group request."""
+        request_to_validate = {'description': 'endpoint group description',
+                               'name': 'endpoint_group_name'}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_endpoint_grp_validator.validate,
+                          request_to_validate)
+
+    def test_validate_endpoint_group_update_request_succeeds(self):
+        """Test that we validate an endpoint group update request."""
+        request_to_validate = {'description': 'endpoint group description',
+                               'filters': {'interface': 'admin'},
+                               'name': 'endpoint_group_name'}
+        self.update_endpoint_grp_validator.validate(request_to_validate)
+
+    def test_validate_endpoint_group_update_fails_with_no_parameters(self):
+        """Exception raised when no parameters on endpoint group update."""
+        request_to_validate = {}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_endpoint_grp_validator.validate,
+                          request_to_validate)
+
+    def test_validate_endpoint_group_update_succeeds_with_name(self):
+        """Validate request with  only `name` in endpoint group update.
+
+        This test ensures that passing only a `name` passes validation
+        on update endpoint group request.
+        """
+        request_to_validate = {'name': 'endpoint_group_name'}
+        self.update_endpoint_grp_validator.validate(request_to_validate)
+
+    def test_validate_endpoint_group_update_succeeds_with_valid_filters(self):
+        """Validate `filters` as dict values."""
+        for valid_filters in _VALID_FILTERS:
+            request_to_validate = {'filters': valid_filters}
+            self.update_endpoint_grp_validator.validate(request_to_validate)
+
+    def test_validate_endpoint_group_update_fails_with_invalid_filters(self):
+        """Exception raised when passing invalid `filters` in request."""
+        for invalid_filters in _INVALID_FILTERS:
+            request_to_validate = {'filters': invalid_filters}
+            self.assertRaises(exception.SchemaValidationError,
+                              self.update_endpoint_grp_validator.validate,
+                              request_to_validate)
+
+
+class TrustValidationTestCase(testtools.TestCase):
+    """Test for V3 Trust API validation."""
+
+    _valid_roles = ['member', uuid.uuid4().hex, str(uuid.uuid4())]
+    _invalid_roles = [False, True, 123, None]
+
+    def setUp(self):
+        super(TrustValidationTestCase, self).setUp()
+
+        create = trust_schema.trust_create
+        self.create_trust_validator = validators.SchemaValidator(create)
+
+    def test_validate_trust_succeeds(self):
+        """Test that we can validate a trust request."""
+        request_to_validate = {'trustor_user_id': uuid.uuid4().hex,
+                               'trustee_user_id': uuid.uuid4().hex,
+                               'impersonation': False}
+        self.create_trust_validator.validate(request_to_validate)
+
+    def test_validate_trust_with_all_parameters_succeeds(self):
+        """Test that we can validate a trust request with all parameters."""
+        request_to_validate = {'trustor_user_id': uuid.uuid4().hex,
+                               'trustee_user_id': uuid.uuid4().hex,
+                               'impersonation': False,
+                               'project_id': uuid.uuid4().hex,
+                               'roles': [uuid.uuid4().hex, uuid.uuid4().hex],
+                               'expires_at': 'some timestamp',
+                               'remaining_uses': 2}
+        self.create_trust_validator.validate(request_to_validate)
+
+    def test_validate_trust_without_trustor_id_fails(self):
+        """Validate trust request fails without `trustor_id`."""
+        request_to_validate = {'trustee_user_id': uuid.uuid4().hex,
+                               'impersonation': False}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_trust_validator.validate,
+                          request_to_validate)
+
+    def test_validate_trust_without_trustee_id_fails(self):
+        """Validate trust request fails without `trustee_id`."""
+        request_to_validate = {'trusor_user_id': uuid.uuid4().hex,
+                               'impersonation': False}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_trust_validator.validate,
+                          request_to_validate)
+
+    def test_validate_trust_without_impersonation_fails(self):
+        """Validate trust request fails without `impersonation`."""
+        request_to_validate = {'trustee_user_id': uuid.uuid4().hex,
+                               'trustor_user_id': uuid.uuid4().hex}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_trust_validator.validate,
+                          request_to_validate)
+
+    def test_validate_trust_with_extra_parameters_succeeds(self):
+        """Test that we can validate a trust request with extra parameters."""
+        request_to_validate = {'trustor_user_id': uuid.uuid4().hex,
+                               'trustee_user_id': uuid.uuid4().hex,
+                               'impersonation': False,
+                               'project_id': uuid.uuid4().hex,
+                               'roles': [uuid.uuid4().hex, uuid.uuid4().hex],
+                               'expires_at': 'some timestamp',
+                               'remaining_uses': 2,
+                               'extra': 'something extra!'}
+        self.create_trust_validator.validate(request_to_validate)
+
+    def test_validate_trust_with_invalid_impersonation_fails(self):
+        """Validate trust request with invalid `impersonation` fails."""
+        request_to_validate = {'trustor_user_id': uuid.uuid4().hex,
+                               'trustee_user_id': uuid.uuid4().hex,
+                               'impersonation': 2}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_trust_validator.validate,
+                          request_to_validate)
+
+    def test_validate_trust_with_null_remaining_uses_succeeds(self):
+        """Validate trust request with null `remaining_uses`."""
+        request_to_validate = {'trustor_user_id': uuid.uuid4().hex,
+                               'trustee_user_id': uuid.uuid4().hex,
+                               'impersonation': False,
+                               'remaining_uses': None}
+        self.create_trust_validator.validate(request_to_validate)
+
+    def test_validate_trust_with_remaining_uses_succeeds(self):
+        """Validate trust request with `remaining_uses` succeeds."""
+        request_to_validate = {'trustor_user_id': uuid.uuid4().hex,
+                               'trustee_user_id': uuid.uuid4().hex,
+                               'impersonation': False,
+                               'remaining_uses': 2}
+        self.create_trust_validator.validate(request_to_validate)
+
+    def test_validate_trust_with_invalid_expires_at_fails(self):
+        """Validate trust request with invalid `expires_at` fails."""
+        request_to_validate = {'trustor_user_id': uuid.uuid4().hex,
+                               'trustee_user_id': uuid.uuid4().hex,
+                               'impersonation': False,
+                               'expires_at': 3}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_trust_validator.validate,
+                          request_to_validate)
+
+    def test_validate_trust_with_role_types_succeeds(self):
+        """Validate trust request with `roles` succeeds."""
+        for role in self._valid_roles:
+            request_to_validate = {'trustor_user_id': uuid.uuid4().hex,
+                                   'trustee_user_id': uuid.uuid4().hex,
+                                   'impersonation': False,
+                                   'roles': [role]}
+            self.create_trust_validator.validate(request_to_validate)
+
+    def test_validate_trust_with_invalid_role_type_fails(self):
+        """Validate trust request with invalid `roles` fails."""
+        for role in self._invalid_roles:
+            request_to_validate = {'trustor_user_id': uuid.uuid4().hex,
+                                   'trustee_user_id': uuid.uuid4().hex,
+                                   'impersonation': False,
+                                   'roles': role}
+            self.assertRaises(exception.SchemaValidationError,
+                              self.create_trust_validator.validate,
+                              request_to_validate)
+
+    def test_validate_trust_with_list_of_valid_roles_succeeds(self):
+        """Validate trust request with a list of valid `roles`."""
+        request_to_validate = {'trustor_user_id': uuid.uuid4().hex,
+                               'trustee_user_id': uuid.uuid4().hex,
+                               'impersonation': False,
+                               'roles': self._valid_roles}
+        self.create_trust_validator.validate(request_to_validate)
+
+
+class ServiceProviderValidationTestCase(testtools.TestCase):
+    """Test for V3 Service Provider API validation."""
+
+    def setUp(self):
+        super(ServiceProviderValidationTestCase, self).setUp()
+
+        self.valid_auth_url = 'https://' + uuid.uuid4().hex + '.com'
+        self.valid_sp_url = 'https://' + uuid.uuid4().hex + '.com'
+
+        create = federation_schema.service_provider_create
+        update = federation_schema.service_provider_update
+        self.create_sp_validator = validators.SchemaValidator(create)
+        self.update_sp_validator = validators.SchemaValidator(update)
+
+    def test_validate_sp_request(self):
+        """Test that we validate `auth_url` and `sp_url` in request."""
+        request_to_validate = {
+            'auth_url': self.valid_auth_url,
+            'sp_url': self.valid_sp_url
+        }
+        self.create_sp_validator.validate(request_to_validate)
+
+    def test_validate_sp_request_with_invalid_auth_url_fails(self):
+        """Validate request fails with invalid `auth_url`."""
+        request_to_validate = {
+            'auth_url': uuid.uuid4().hex,
+            'sp_url': self.valid_sp_url
+        }
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_sp_validator.validate,
+                          request_to_validate)
+
+    def test_validate_sp_request_with_invalid_sp_url_fails(self):
+        """Validate request fails with invalid `sp_url`."""
+        request_to_validate = {
+            'auth_url': self.valid_auth_url,
+            'sp_url': uuid.uuid4().hex,
+        }
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_sp_validator.validate,
+                          request_to_validate)
+
+    def test_validate_sp_request_without_auth_url_fails(self):
+        """Validate request fails without `auth_url`."""
+        request_to_validate = {
+            'sp_url': self.valid_sp_url
+        }
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_sp_validator.validate,
+                          request_to_validate)
+        request_to_validate = {
+            'auth_url': None,
+            'sp_url': self.valid_sp_url
+        }
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_sp_validator.validate,
+                          request_to_validate)
+
+    def test_validate_sp_request_without_sp_url_fails(self):
+        """Validate request fails without `sp_url`."""
+        request_to_validate = {
+            'auth_url': self.valid_auth_url,
+        }
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_sp_validator.validate,
+                          request_to_validate)
+        request_to_validate = {
+            'auth_url': self.valid_auth_url,
+            'sp_url': None,
+        }
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_sp_validator.validate,
+                          request_to_validate)
+
+    def test_validate_sp_request_with_enabled(self):
+        """Validate `enabled` as boolean-like values."""
+        for valid_enabled in _VALID_ENABLED_FORMATS:
+            request_to_validate = {
+                'auth_url': self.valid_auth_url,
+                'sp_url': self.valid_sp_url,
+                'enabled': valid_enabled
+            }
+            self.create_sp_validator.validate(request_to_validate)
+
+    def test_validate_sp_request_with_invalid_enabled_fails(self):
+        """Exception is raised when `enabled` isn't a boolean-like value."""
+        for invalid_enabled in _INVALID_ENABLED_FORMATS:
+            request_to_validate = {
+                'auth_url': self.valid_auth_url,
+                'sp_url': self.valid_sp_url,
+                'enabled': invalid_enabled
+            }
+            self.assertRaises(exception.SchemaValidationError,
+                              self.create_sp_validator.validate,
+                              request_to_validate)
+
+    def test_validate_sp_request_with_valid_description(self):
+        """Test that we validate `description` in create requests."""
+        request_to_validate = {
+            'auth_url': self.valid_auth_url,
+            'sp_url': self.valid_sp_url,
+            'description': 'My Service Provider'
+        }
+        self.create_sp_validator.validate(request_to_validate)
+
+    def test_validate_sp_request_with_invalid_description_fails(self):
+        """Exception is raised when `description` as a non-string value."""
+        request_to_validate = {
+            'auth_url': self.valid_auth_url,
+            'sp_url': self.valid_sp_url,
+            'description': False
+        }
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_sp_validator.validate,
+                          request_to_validate)
+
+    def test_validate_sp_request_with_extra_field_fails(self):
+        """Exception raised when passing extra fields in the body."""
+        # 'id' can't be passed in the body since it is passed in the URL
+        request_to_validate = {
+            'id': 'ACME',
+            'auth_url': self.valid_auth_url,
+            'sp_url': self.valid_sp_url,
+            'description': 'My Service Provider'
+        }
+        self.assertRaises(exception.SchemaValidationError,
+                          self.create_sp_validator.validate,
+                          request_to_validate)
+
+    def test_validate_sp_update_request(self):
+        """Test that we validate a update request."""
+        request_to_validate = {'description': uuid.uuid4().hex}
+        self.update_sp_validator.validate(request_to_validate)
+
+    def test_validate_sp_update_request_with_no_parameters_fails(self):
+        """Exception is raised when updating without parameters."""
+        request_to_validate = {}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_sp_validator.validate,
+                          request_to_validate)
+
+    def test_validate_sp_update_request_with_invalid_auth_url_fails(self):
+        """Exception raised when updating with invalid `auth_url`."""
+        request_to_validate = {'auth_url': uuid.uuid4().hex}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_sp_validator.validate,
+                          request_to_validate)
+        request_to_validate = {'auth_url': None}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_sp_validator.validate,
+                          request_to_validate)
+
+    def test_validate_sp_update_request_with_invalid_sp_url_fails(self):
+        """Exception raised when updating with invalid `sp_url`."""
+        request_to_validate = {'sp_url': uuid.uuid4().hex}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_sp_validator.validate,
+                          request_to_validate)
+        request_to_validate = {'sp_url': None}
+        self.assertRaises(exception.SchemaValidationError,
+                          self.update_sp_validator.validate,
+                          request_to_validate)
diff --git a/keystone-moon/keystone/tests/unit/test_versions.py b/keystone-moon/keystone/tests/unit/test_versions.py
new file mode 100644 (file)
index 0000000..6fe692a
--- /dev/null
@@ -0,0 +1,1051 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+import functools
+import random
+
+import mock
+from oslo_config import cfg
+from oslo_serialization import jsonutils
+from testtools import matchers as tt_matchers
+
+from keystone.common import json_home
+from keystone import controllers
+from keystone.tests import unit as tests
+
+
+CONF = cfg.CONF
+
+v2_MEDIA_TYPES = [
+    {
+        "base": "application/json",
+        "type": "application/"
+                "vnd.openstack.identity-v2.0+json"
+    }
+]
+
+v2_HTML_DESCRIPTION = {
+    "rel": "describedby",
+    "type": "text/html",
+    "href": "http://docs.openstack.org/"
+}
+
+
+v2_EXPECTED_RESPONSE = {
+    "id": "v2.0",
+    "status": "stable",
+    "updated": "2014-04-17T00:00:00Z",
+    "links": [
+        {
+            "rel": "self",
+            "href": "",     # Will get filled in after initialization
+        },
+        v2_HTML_DESCRIPTION
+    ],
+    "media-types": v2_MEDIA_TYPES
+}
+
+v2_VERSION_RESPONSE = {
+    "version": v2_EXPECTED_RESPONSE
+}
+
+v3_MEDIA_TYPES = [
+    {
+        "base": "application/json",
+        "type": "application/"
+                "vnd.openstack.identity-v3+json"
+    }
+]
+
+v3_EXPECTED_RESPONSE = {
+    "id": "v3.0",
+    "status": "stable",
+    "updated": "2013-03-06T00:00:00Z",
+    "links": [
+        {
+            "rel": "self",
+            "href": "",     # Will get filled in after initialization
+        }
+    ],
+    "media-types": v3_MEDIA_TYPES
+}
+
+v3_VERSION_RESPONSE = {
+    "version": v3_EXPECTED_RESPONSE
+}
+
+VERSIONS_RESPONSE = {
+    "versions": {
+        "values": [
+            v3_EXPECTED_RESPONSE,
+            v2_EXPECTED_RESPONSE
+        ]
+    }
+}
+
+_build_ec2tokens_relation = functools.partial(
+    json_home.build_v3_extension_resource_relation, extension_name='OS-EC2',
+    extension_version='1.0')
+
+REVOCATIONS_RELATION = json_home.build_v3_extension_resource_relation(
+    'OS-PKI', '1.0', 'revocations')
+
+_build_simple_cert_relation = functools.partial(
+    json_home.build_v3_extension_resource_relation,
+    extension_name='OS-SIMPLE-CERT', extension_version='1.0')
+
+_build_trust_relation = functools.partial(
+    json_home.build_v3_extension_resource_relation, extension_name='OS-TRUST',
+    extension_version='1.0')
+
+_build_federation_rel = functools.partial(
+    json_home.build_v3_extension_resource_relation,
+    extension_name='OS-FEDERATION',
+    extension_version='1.0')
+
+_build_oauth1_rel = functools.partial(
+    json_home.build_v3_extension_resource_relation,
+    extension_name='OS-OAUTH1', extension_version='1.0')
+
+_build_ep_policy_rel = functools.partial(
+    json_home.build_v3_extension_resource_relation,
+    extension_name='OS-ENDPOINT-POLICY', extension_version='1.0')
+
+_build_ep_filter_rel = functools.partial(
+    json_home.build_v3_extension_resource_relation,
+    extension_name='OS-EP-FILTER', extension_version='1.0')
+
+TRUST_ID_PARAMETER_RELATION = json_home.build_v3_extension_parameter_relation(
+    'OS-TRUST', '1.0', 'trust_id')
+
+IDP_ID_PARAMETER_RELATION = json_home.build_v3_extension_parameter_relation(
+    'OS-FEDERATION', '1.0', 'idp_id')
+
+PROTOCOL_ID_PARAM_RELATION = json_home.build_v3_extension_parameter_relation(
+    'OS-FEDERATION', '1.0', 'protocol_id')
+
+MAPPING_ID_PARAM_RELATION = json_home.build_v3_extension_parameter_relation(
+    'OS-FEDERATION', '1.0', 'mapping_id')
+
+SP_ID_PARAMETER_RELATION = json_home.build_v3_extension_parameter_relation(
+    'OS-FEDERATION', '1.0', 'sp_id')
+
+CONSUMER_ID_PARAMETER_RELATION = (
+    json_home.build_v3_extension_parameter_relation(
+        'OS-OAUTH1', '1.0', 'consumer_id'))
+
+REQUEST_TOKEN_ID_PARAMETER_RELATION = (
+    json_home.build_v3_extension_parameter_relation(
+        'OS-OAUTH1', '1.0', 'request_token_id'))
+
+ACCESS_TOKEN_ID_PARAMETER_RELATION = (
+    json_home.build_v3_extension_parameter_relation(
+        'OS-OAUTH1', '1.0', 'access_token_id'))
+
+ENDPOINT_GROUP_ID_PARAMETER_RELATION = (
+    json_home.build_v3_extension_parameter_relation(
+        'OS-EP-FILTER', '1.0', 'endpoint_group_id'))
+
+BASE_IDP_PROTOCOL = '/OS-FEDERATION/identity_providers/{idp_id}/protocols'
+BASE_EP_POLICY = '/policies/{policy_id}/OS-ENDPOINT-POLICY'
+BASE_EP_FILTER = '/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}'
+BASE_ACCESS_TOKEN = (
+    '/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}')
+
+# TODO(stevemar): Use BASE_IDP_PROTOCOL when bug 1420125 is resolved.
+FEDERATED_AUTH_URL = ('/OS-FEDERATION/identity_providers/{identity_provider}'
+                      '/protocols/{protocol}/auth')
+
+V3_JSON_HOME_RESOURCES_INHERIT_DISABLED = {
+    json_home.build_v3_resource_relation('auth_tokens'): {
+        'href': '/auth/tokens'},
+    json_home.build_v3_resource_relation('auth_catalog'): {
+        'href': '/auth/catalog'},
+    json_home.build_v3_resource_relation('auth_projects'): {
+        'href': '/auth/projects'},
+    json_home.build_v3_resource_relation('auth_domains'): {
+        'href': '/auth/domains'},
+    json_home.build_v3_resource_relation('credential'): {
+        'href-template': '/credentials/{credential_id}',
+        'href-vars': {
+            'credential_id':
+            json_home.build_v3_parameter_relation('credential_id')}},
+    json_home.build_v3_resource_relation('credentials'): {
+        'href': '/credentials'},
+    json_home.build_v3_resource_relation('domain'): {
+        'href-template': '/domains/{domain_id}',
+        'href-vars': {'domain_id': json_home.Parameters.DOMAIN_ID, }},
+    json_home.build_v3_resource_relation('domain_group_role'): {
+        'href-template':
+        '/domains/{domain_id}/groups/{group_id}/roles/{role_id}',
+        'href-vars': {
+            'domain_id': json_home.Parameters.DOMAIN_ID,
+            'group_id': json_home.Parameters.GROUP_ID,
+            'role_id': json_home.Parameters.ROLE_ID, }},
+    json_home.build_v3_resource_relation('domain_group_roles'): {
+        'href-template': '/domains/{domain_id}/groups/{group_id}/roles',
+        'href-vars': {
+            'domain_id': json_home.Parameters.DOMAIN_ID,
+            'group_id': json_home.Parameters.GROUP_ID}},
+    json_home.build_v3_resource_relation('domain_user_role'): {
+        'href-template':
+        '/domains/{domain_id}/users/{user_id}/roles/{role_id}',
+        'href-vars': {
+            'domain_id': json_home.Parameters.DOMAIN_ID,
+            'role_id': json_home.Parameters.ROLE_ID,
+            'user_id': json_home.Parameters.USER_ID, }},
+    json_home.build_v3_resource_relation('domain_user_roles'): {
+        'href-template': '/domains/{domain_id}/users/{user_id}/roles',
+        'href-vars': {
+            'domain_id': json_home.Parameters.DOMAIN_ID,
+            'user_id': json_home.Parameters.USER_ID, }},
+    json_home.build_v3_resource_relation('domains'): {'href': '/domains'},
+    json_home.build_v3_resource_relation('endpoint'): {
+        'href-template': '/endpoints/{endpoint_id}',
+        'href-vars': {
+            'endpoint_id':
+            json_home.build_v3_parameter_relation('endpoint_id'), }},
+    json_home.build_v3_resource_relation('endpoints'): {
+        'href': '/endpoints'},
+    _build_ec2tokens_relation(resource_name='ec2tokens'): {
+        'href': '/ec2tokens'},
+    _build_ec2tokens_relation(resource_name='user_credential'): {
+        'href-template': '/users/{user_id}/credentials/OS-EC2/{credential_id}',
+        'href-vars': {
+            'credential_id': json_home.build_v3_extension_parameter_relation(
+                'OS-EC2', '1.0', 'credential_id'),
+            'user_id': json_home.Parameters.USER_ID, }},
+    _build_ec2tokens_relation(resource_name='user_credentials'): {
+        'href-template': '/users/{user_id}/credentials/OS-EC2',
+        'href-vars': {
+            'user_id': json_home.Parameters.USER_ID, }},
+    REVOCATIONS_RELATION: {
+        'href': '/auth/tokens/OS-PKI/revoked'},
+    'http://docs.openstack.org/api/openstack-identity/3/ext/OS-REVOKE/1.0/rel/'
+    'events': {
+        'href': '/OS-REVOKE/events'},
+    _build_simple_cert_relation(resource_name='ca_certificate'): {
+        'href': '/OS-SIMPLE-CERT/ca'},
+    _build_simple_cert_relation(resource_name='certificates'): {
+        'href': '/OS-SIMPLE-CERT/certificates'},
+    _build_trust_relation(resource_name='trust'):
+    {
+        'href-template': '/OS-TRUST/trusts/{trust_id}',
+        'href-vars': {'trust_id': TRUST_ID_PARAMETER_RELATION, }},
+    _build_trust_relation(resource_name='trust_role'): {
+        'href-template': '/OS-TRUST/trusts/{trust_id}/roles/{role_id}',
+        'href-vars': {
+            'role_id': json_home.Parameters.ROLE_ID,
+            'trust_id': TRUST_ID_PARAMETER_RELATION, }},
+    _build_trust_relation(resource_name='trust_roles'): {
+        'href-template': '/OS-TRUST/trusts/{trust_id}/roles',
+        'href-vars': {'trust_id': TRUST_ID_PARAMETER_RELATION, }},
+    _build_trust_relation(resource_name='trusts'): {
+        'href': '/OS-TRUST/trusts'},
+    'http://docs.openstack.org/api/openstack-identity/3/ext/s3tokens/1.0/rel/'
+    's3tokens': {
+        'href': '/s3tokens'},
+    json_home.build_v3_resource_relation('group'): {
+        'href-template': '/groups/{group_id}',
+        'href-vars': {
+            'group_id': json_home.Parameters.GROUP_ID, }},
+    json_home.build_v3_resource_relation('group_user'): {
+        'href-template': '/groups/{group_id}/users/{user_id}',
+        'href-vars': {
+            'group_id': json_home.Parameters.GROUP_ID,
+            'user_id': json_home.Parameters.USER_ID, }},
+    json_home.build_v3_resource_relation('group_users'): {
+        'href-template': '/groups/{group_id}/users',
+        'href-vars': {'group_id': json_home.Parameters.GROUP_ID, }},
+    json_home.build_v3_resource_relation('groups'): {'href': '/groups'},
+    json_home.build_v3_resource_relation('policies'): {
+        'href': '/policies'},
+    json_home.build_v3_resource_relation('policy'): {
+        'href-template': '/policies/{policy_id}',
+        'href-vars': {
+            'policy_id':
+            json_home.build_v3_parameter_relation('policy_id'), }},
+    json_home.build_v3_resource_relation('project'): {
+        'href-template': '/projects/{project_id}',
+        'href-vars': {
+            'project_id': json_home.Parameters.PROJECT_ID, }},
+    json_home.build_v3_resource_relation('project_group_role'): {
+        'href-template':
+        '/projects/{project_id}/groups/{group_id}/roles/{role_id}',
+        'href-vars': {
+            'group_id': json_home.Parameters.GROUP_ID,
+            'project_id': json_home.Parameters.PROJECT_ID,
+            'role_id': json_home.Parameters.ROLE_ID, }},
+    json_home.build_v3_resource_relation('project_group_roles'): {
+        'href-template': '/projects/{project_id}/groups/{group_id}/roles',
+        'href-vars': {
+            'group_id': json_home.Parameters.GROUP_ID,
+            'project_id': json_home.Parameters.PROJECT_ID, }},
+    json_home.build_v3_resource_relation('project_user_role'): {
+        'href-template':
+        '/projects/{project_id}/users/{user_id}/roles/{role_id}',
+        'href-vars': {
+            'project_id': json_home.Parameters.PROJECT_ID,
+            'role_id': json_home.Parameters.ROLE_ID,
+            'user_id': json_home.Parameters.USER_ID, }},
+    json_home.build_v3_resource_relation('project_user_roles'): {
+        'href-template': '/projects/{project_id}/users/{user_id}/roles',
+        'href-vars': {
+            'project_id': json_home.Parameters.PROJECT_ID,
+            'user_id': json_home.Parameters.USER_ID, }},
+    json_home.build_v3_resource_relation('projects'): {
+        'href': '/projects'},
+    json_home.build_v3_resource_relation('region'): {
+        'href-template': '/regions/{region_id}',
+        'href-vars': {
+            'region_id':
+            json_home.build_v3_parameter_relation('region_id'), }},
+    json_home.build_v3_resource_relation('regions'): {'href': '/regions'},
+    json_home.build_v3_resource_relation('role'): {
+        'href-template': '/roles/{role_id}',
+        'href-vars': {
+            'role_id': json_home.Parameters.ROLE_ID, }},
+    json_home.build_v3_resource_relation('role_assignments'): {
+        'href': '/role_assignments'},
+    json_home.build_v3_resource_relation('roles'): {'href': '/roles'},
+    json_home.build_v3_resource_relation('service'): {
+        'href-template': '/services/{service_id}',
+        'href-vars': {
+            'service_id':
+            json_home.build_v3_parameter_relation('service_id')}},
+    json_home.build_v3_resource_relation('services'): {
+        'href': '/services'},
+    json_home.build_v3_resource_relation('user'): {
+        'href-template': '/users/{user_id}',
+        'href-vars': {
+            'user_id': json_home.Parameters.USER_ID, }},
+    json_home.build_v3_resource_relation('user_change_password'): {
+        'href-template': '/users/{user_id}/password',
+        'href-vars': {'user_id': json_home.Parameters.USER_ID, }},
+    json_home.build_v3_resource_relation('user_groups'): {
+        'href-template': '/users/{user_id}/groups',
+        'href-vars': {'user_id': json_home.Parameters.USER_ID, }},
+    json_home.build_v3_resource_relation('user_projects'): {
+        'href-template': '/users/{user_id}/projects',
+        'href-vars': {'user_id': json_home.Parameters.USER_ID, }},
+    json_home.build_v3_resource_relation('users'): {'href': '/users'},
+    _build_federation_rel(resource_name='domains'): {
+        'href': '/OS-FEDERATION/domains'},
+    _build_federation_rel(resource_name='websso'): {
+        'href-template': '/auth/OS-FEDERATION/websso/{protocol_id}',
+        'href-vars': {
+            'protocol_id': PROTOCOL_ID_PARAM_RELATION, }},
+    _build_federation_rel(resource_name='projects'): {
+        'href': '/OS-FEDERATION/projects'},
+    _build_federation_rel(resource_name='saml2'): {
+        'href': '/auth/OS-FEDERATION/saml2'},
+    _build_federation_rel(resource_name='metadata'): {
+        'href': '/OS-FEDERATION/saml2/metadata'},
+    _build_federation_rel(resource_name='identity_providers'): {
+        'href': '/OS-FEDERATION/identity_providers'},
+    _build_federation_rel(resource_name='service_providers'): {
+        'href': '/OS-FEDERATION/service_providers'},
+    _build_federation_rel(resource_name='mappings'): {
+        'href': '/OS-FEDERATION/mappings'},
+    _build_federation_rel(resource_name='identity_provider'):
+    {
+        'href-template': '/OS-FEDERATION/identity_providers/{idp_id}',
+        'href-vars': {'idp_id': IDP_ID_PARAMETER_RELATION, }},
+    _build_federation_rel(resource_name='service_provider'):
+    {
+        'href-template': '/OS-FEDERATION/service_providers/{sp_id}',
+        'href-vars': {'sp_id': SP_ID_PARAMETER_RELATION, }},
+    _build_federation_rel(resource_name='mapping'):
+    {
+        'href-template': '/OS-FEDERATION/mappings/{mapping_id}',
+        'href-vars': {'mapping_id': MAPPING_ID_PARAM_RELATION, }},
+    _build_federation_rel(resource_name='identity_provider_protocol'): {
+        'href-template': BASE_IDP_PROTOCOL + '/{protocol_id}',
+        'href-vars': {
+            'idp_id': IDP_ID_PARAMETER_RELATION,
+            'protocol_id': PROTOCOL_ID_PARAM_RELATION, }},
+    _build_federation_rel(resource_name='identity_provider_protocols'): {
+        'href-template': BASE_IDP_PROTOCOL,
+        'href-vars': {
+            'idp_id': IDP_ID_PARAMETER_RELATION}},
+    # TODO(stevemar): Update href-vars when bug 1420125 is resolved.
+    _build_federation_rel(resource_name='identity_provider_protocol_auth'): {
+        'href-template': FEDERATED_AUTH_URL,
+        'href-vars': {
+            'identity_provider': IDP_ID_PARAMETER_RELATION,
+            'protocol': PROTOCOL_ID_PARAM_RELATION, }},
+    _build_oauth1_rel(resource_name='access_tokens'): {
+        'href': '/OS-OAUTH1/access_token'},
+    _build_oauth1_rel(resource_name='request_tokens'): {
+        'href': '/OS-OAUTH1/request_token'},
+    _build_oauth1_rel(resource_name='consumers'): {
+        'href': '/OS-OAUTH1/consumers'},
+    _build_oauth1_rel(resource_name='authorize_request_token'):
+    {
+        'href-template': '/OS-OAUTH1/authorize/{request_token_id}',
+        'href-vars': {'request_token_id':
+                      REQUEST_TOKEN_ID_PARAMETER_RELATION, }},
+    _build_oauth1_rel(resource_name='consumer'):
+    {
+        'href-template': '/OS-OAUTH1/consumers/{consumer_id}',
+        'href-vars': {'consumer_id': CONSUMER_ID_PARAMETER_RELATION, }},
+    _build_oauth1_rel(resource_name='user_access_token'):
+    {
+        'href-template': BASE_ACCESS_TOKEN,
+        'href-vars': {'user_id': json_home.Parameters.USER_ID,
+                      'access_token_id':
+                      ACCESS_TOKEN_ID_PARAMETER_RELATION, }},
+    _build_oauth1_rel(resource_name='user_access_tokens'):
+    {
+        'href-template': '/users/{user_id}/OS-OAUTH1/access_tokens',
+        'href-vars': {'user_id': json_home.Parameters.USER_ID, }},
+    _build_oauth1_rel(resource_name='user_access_token_role'):
+    {
+        'href-template': BASE_ACCESS_TOKEN + '/roles/{role_id}',
+        'href-vars': {'user_id': json_home.Parameters.USER_ID,
+                      'role_id': json_home.Parameters.ROLE_ID,
+                      'access_token_id':
+                      ACCESS_TOKEN_ID_PARAMETER_RELATION, }},
+    _build_oauth1_rel(resource_name='user_access_token_roles'):
+    {
+        'href-template': BASE_ACCESS_TOKEN + '/roles',
+        'href-vars': {'user_id': json_home.Parameters.USER_ID,
+                      'access_token_id':
+                      ACCESS_TOKEN_ID_PARAMETER_RELATION, }},
+    _build_ep_policy_rel(resource_name='endpoint_policy'):
+    {
+        'href-template': '/endpoints/{endpoint_id}/OS-ENDPOINT-POLICY/policy',
+        'href-vars': {'endpoint_id': json_home.Parameters.ENDPOINT_ID, }},
+    _build_ep_policy_rel(resource_name='endpoint_policy_association'):
+    {
+        'href-template': BASE_EP_POLICY + '/endpoints/{endpoint_id}',
+        'href-vars': {'endpoint_id': json_home.Parameters.ENDPOINT_ID,
+                      'policy_id': json_home.Parameters.POLICY_ID, }},
+    _build_ep_policy_rel(resource_name='policy_endpoints'):
+    {
+        'href-template': BASE_EP_POLICY + '/endpoints',
+        'href-vars': {'policy_id': json_home.Parameters.POLICY_ID, }},
+    _build_ep_policy_rel(
+        resource_name='region_and_service_policy_association'):
+    {
+        'href-template': (BASE_EP_POLICY +
+                          '/services/{service_id}/regions/{region_id}'),
+        'href-vars': {'policy_id': json_home.Parameters.POLICY_ID,
+                      'service_id': json_home.Parameters.SERVICE_ID,
+                      'region_id': json_home.Parameters.REGION_ID, }},
+    _build_ep_policy_rel(resource_name='service_policy_association'):
+    {
+        'href-template': BASE_EP_POLICY + '/services/{service_id}',
+        'href-vars': {'policy_id': json_home.Parameters.POLICY_ID,
+                      'service_id': json_home.Parameters.SERVICE_ID, }},
+    _build_ep_filter_rel(resource_name='endpoint_group'):
+    {
+        'href-template': '/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}',
+        'href-vars': {'endpoint_group_id':
+                      ENDPOINT_GROUP_ID_PARAMETER_RELATION, }},
+    _build_ep_filter_rel(
+        resource_name='endpoint_group_to_project_association'):
+    {
+        'href-template': BASE_EP_FILTER + '/projects/{project_id}',
+        'href-vars': {'endpoint_group_id':
+                      ENDPOINT_GROUP_ID_PARAMETER_RELATION,
+                      'project_id': json_home.Parameters.PROJECT_ID, }},
+    _build_ep_filter_rel(resource_name='endpoint_groups'):
+    {'href': '/OS-EP-FILTER/endpoint_groups'},
+    _build_ep_filter_rel(resource_name='endpoint_projects'):
+    {
+        'href-template': '/OS-EP-FILTER/endpoints/{endpoint_id}/projects',
+        'href-vars': {'endpoint_id': json_home.Parameters.ENDPOINT_ID, }},
+    _build_ep_filter_rel(resource_name='endpoints_in_endpoint_group'):
+    {
+        'href-template': BASE_EP_FILTER + '/endpoints',
+        'href-vars': {'endpoint_group_id':
+                      ENDPOINT_GROUP_ID_PARAMETER_RELATION, }},
+    _build_ep_filter_rel(resource_name='project_endpoint'):
+    {
+        'href-template': ('/OS-EP-FILTER/projects/{project_id}'
+                          '/endpoints/{endpoint_id}'),
+        'href-vars': {'endpoint_id': json_home.Parameters.ENDPOINT_ID,
+                      'project_id': json_home.Parameters.PROJECT_ID, }},
+    _build_ep_filter_rel(resource_name='project_endpoints'):
+    {
+        'href-template': '/OS-EP-FILTER/projects/{project_id}/endpoints',
+        'href-vars': {'project_id': json_home.Parameters.PROJECT_ID, }},
+    _build_ep_filter_rel(
+        resource_name='projects_associated_with_endpoint_group'):
+    {
+        'href-template': BASE_EP_FILTER + '/projects',
+        'href-vars': {'endpoint_group_id':
+                      ENDPOINT_GROUP_ID_PARAMETER_RELATION, }},
+    json_home.build_v3_resource_relation('domain_config'): {
+        'href-template':
+        '/domains/{domain_id}/config',
+        'href-vars': {
+            'domain_id': json_home.Parameters.DOMAIN_ID},
+        'hints': {'status': 'experimental'}},
+    json_home.build_v3_resource_relation('domain_config_group'): {
+        'href-template':
+        '/domains/{domain_id}/config/{group}',
+        'href-vars': {
+            'domain_id': json_home.Parameters.DOMAIN_ID,
+            'group': json_home.build_v3_parameter_relation('config_group')},
+        'hints': {'status': 'experimental'}},
+    json_home.build_v3_resource_relation('domain_config_option'): {
+        'href-template':
+        '/domains/{domain_id}/config/{group}/{option}',
+        'href-vars': {
+            'domain_id': json_home.Parameters.DOMAIN_ID,
+            'group': json_home.build_v3_parameter_relation('config_group'),
+            'option': json_home.build_v3_parameter_relation('config_option')},
+        'hints': {'status': 'experimental'}},
+}
+
+
+# with os-inherit enabled, there's some more resources.
+
+build_os_inherit_relation = functools.partial(
+    json_home.build_v3_extension_resource_relation,
+    extension_name='OS-INHERIT', extension_version='1.0')
+
+V3_JSON_HOME_RESOURCES_INHERIT_ENABLED = dict(
+    V3_JSON_HOME_RESOURCES_INHERIT_DISABLED)
+V3_JSON_HOME_RESOURCES_INHERIT_ENABLED.update(
+    (
+        (
+            build_os_inherit_relation(
+                resource_name='domain_user_role_inherited_to_projects'),
+            {
+                'href-template': '/OS-INHERIT/domains/{domain_id}/users/'
+                '{user_id}/roles/{role_id}/inherited_to_projects',
+                'href-vars': {
+                    'domain_id': json_home.Parameters.DOMAIN_ID,
+                    'role_id': json_home.Parameters.ROLE_ID,
+                    'user_id': json_home.Parameters.USER_ID,
+                },
+            }
+        ),
+        (
+            build_os_inherit_relation(
+                resource_name='domain_group_role_inherited_to_projects'),
+            {
+                'href-template': '/OS-INHERIT/domains/{domain_id}/groups/'
+                '{group_id}/roles/{role_id}/inherited_to_projects',
+                'href-vars': {
+                    'domain_id': json_home.Parameters.DOMAIN_ID,
+                    'group_id': json_home.Parameters.GROUP_ID,
+                    'role_id': json_home.Parameters.ROLE_ID,
+                },
+            }
+        ),
+        (
+            build_os_inherit_relation(
+                resource_name='domain_user_roles_inherited_to_projects'),
+            {
+                'href-template': '/OS-INHERIT/domains/{domain_id}/users/'
+                '{user_id}/roles/inherited_to_projects',
+                'href-vars': {
+                    'domain_id': json_home.Parameters.DOMAIN_ID,
+                    'user_id': json_home.Parameters.USER_ID,
+                },
+            }
+        ),
+        (
+            build_os_inherit_relation(
+                resource_name='domain_group_roles_inherited_to_projects'),
+            {
+                'href-template': '/OS-INHERIT/domains/{domain_id}/groups/'
+                '{group_id}/roles/inherited_to_projects',
+                'href-vars': {
+                    'domain_id': json_home.Parameters.DOMAIN_ID,
+                    'group_id': json_home.Parameters.GROUP_ID,
+                },
+            }
+        ),
+        (
+            build_os_inherit_relation(
+                resource_name='project_user_role_inherited_to_projects'),
+            {
+                'href-template': '/OS-INHERIT/projects/{project_id}/users/'
+                '{user_id}/roles/{role_id}/inherited_to_projects',
+                'href-vars': {
+                    'project_id': json_home.Parameters.PROJECT_ID,
+                    'role_id': json_home.Parameters.ROLE_ID,
+                    'user_id': json_home.Parameters.USER_ID,
+                },
+            }
+        ),
+        (
+            build_os_inherit_relation(
+                resource_name='project_group_role_inherited_to_projects'),
+            {
+                'href-template': '/OS-INHERIT/projects/{project_id}/groups/'
+                '{group_id}/roles/{role_id}/inherited_to_projects',
+                'href-vars': {
+                    'project_id': json_home.Parameters.PROJECT_ID,
+                    'group_id': json_home.Parameters.GROUP_ID,
+                    'role_id': json_home.Parameters.ROLE_ID,
+                },
+            }
+        ),
+    )
+)
+
+
+class _VersionsEqual(tt_matchers.MatchesListwise):
+    def __init__(self, expected):
+        super(_VersionsEqual, self).__init__([
+            tt_matchers.KeysEqual(expected),
+            tt_matchers.KeysEqual(expected['versions']),
+            tt_matchers.HasLength(len(expected['versions']['values'])),
+            tt_matchers.ContainsAll(expected['versions']['values']),
+        ])
+
+    def match(self, other):
+        return super(_VersionsEqual, self).match([
+            other,
+            other['versions'],
+            other['versions']['values'],
+            other['versions']['values'],
+        ])
+
+
+class VersionTestCase(tests.TestCase):
+    def setUp(self):
+        super(VersionTestCase, self).setUp()
+        self.load_backends()
+        self.public_app = self.loadapp('keystone', 'main')
+        self.admin_app = self.loadapp('keystone', 'admin')
+
+        self.config_fixture.config(
+            public_endpoint='http://localhost:%(public_port)d',
+            admin_endpoint='http://localhost:%(admin_port)d')
+
+    def config_overrides(self):
+        super(VersionTestCase, self).config_overrides()
+        port = random.randint(10000, 30000)
+        self.config_fixture.config(group='eventlet_server', public_port=port,
+                                   admin_port=port)
+
+    def _paste_in_port(self, response, port):
+        for link in response['links']:
+            if link['rel'] == 'self':
+                link['href'] = port
+
+    def test_public_versions(self):
+        client = tests.TestClient(self.public_app)
+        resp = client.get('/')
+        self.assertEqual(300, resp.status_int)
+        data = jsonutils.loads(resp.body)
+        expected = VERSIONS_RESPONSE
+        for version in expected['versions']['values']:
+            if version['id'] == 'v3.0':
+                self._paste_in_port(
+                    version, 'http://localhost:%s/v3/' %
+                    CONF.eventlet_server.public_port)
+            elif version['id'] == 'v2.0':
+                self._paste_in_port(
+                    version, 'http://localhost:%s/v2.0/' %
+                    CONF.eventlet_server.public_port)
+        self.assertThat(data, _VersionsEqual(expected))
+
+    def test_admin_versions(self):
+        client = tests.TestClient(self.admin_app)
+        resp = client.get('/')
+        self.assertEqual(300, resp.status_int)
+        data = jsonutils.loads(resp.body)
+        expected = VERSIONS_RESPONSE
+        for version in expected['versions']['values']:
+            if version['id'] == 'v3.0':
+                self._paste_in_port(
+                    version, 'http://localhost:%s/v3/' %
+                    CONF.eventlet_server.admin_port)
+            elif version['id'] == 'v2.0':
+                self._paste_in_port(
+                    version, 'http://localhost:%s/v2.0/' %
+                    CONF.eventlet_server.admin_port)
+        self.assertThat(data, _VersionsEqual(expected))
+
+    def test_use_site_url_if_endpoint_unset(self):
+        self.config_fixture.config(public_endpoint=None, admin_endpoint=None)
+
+        for app in (self.public_app, self.admin_app):
+            client = tests.TestClient(app)
+            resp = client.get('/')
+            self.assertEqual(300, resp.status_int)
+            data = jsonutils.loads(resp.body)
+            expected = VERSIONS_RESPONSE
+            for version in expected['versions']['values']:
+                # localhost happens to be the site url for tests
+                if version['id'] == 'v3.0':
+                    self._paste_in_port(
+                        version, 'http://localhost/v3/')
+                elif version['id'] == 'v2.0':
+                    self._paste_in_port(
+                        version, 'http://localhost/v2.0/')
+            self.assertThat(data, _VersionsEqual(expected))
+
+    def test_public_version_v2(self):
+        client = tests.TestClient(self.public_app)
+        resp = client.get('/v2.0/')
+        self.assertEqual(200, resp.status_int)
+        data = jsonutils.loads(resp.body)
+        expected = v2_VERSION_RESPONSE
+        self._paste_in_port(expected['version'],
+                            'http://localhost:%s/v2.0/' %
+                            CONF.eventlet_server.public_port)
+        self.assertEqual(expected, data)
+
+    def test_admin_version_v2(self):
+        client = tests.TestClient(self.admin_app)
+        resp = client.get('/v2.0/')
+        self.assertEqual(200, resp.status_int)
+        data = jsonutils.loads(resp.body)
+        expected = v2_VERSION_RESPONSE
+        self._paste_in_port(expected['version'],
+                            'http://localhost:%s/v2.0/' %
+                            CONF.eventlet_server.admin_port)
+        self.assertEqual(expected, data)
+
+    def test_use_site_url_if_endpoint_unset_v2(self):
+        self.config_fixture.config(public_endpoint=None, admin_endpoint=None)
+        for app in (self.public_app, self.admin_app):
+            client = tests.TestClient(app)
+            resp = client.get('/v2.0/')
+            self.assertEqual(200, resp.status_int)
+            data = jsonutils.loads(resp.body)
+            expected = v2_VERSION_RESPONSE
+            self._paste_in_port(expected['version'], 'http://localhost/v2.0/')
+            self.assertEqual(data, expected)
+
+    def test_public_version_v3(self):
+        client = tests.TestClient(self.public_app)
+        resp = client.get('/v3/')
+        self.assertEqual(200, resp.status_int)
+        data = jsonutils.loads(resp.body)
+        expected = v3_VERSION_RESPONSE
+        self._paste_in_port(expected['version'],
+                            'http://localhost:%s/v3/' %
+                            CONF.eventlet_server.public_port)
+        self.assertEqual(expected, data)
+
+    def test_admin_version_v3(self):
+        client = tests.TestClient(self.public_app)
+        resp = client.get('/v3/')
+        self.assertEqual(200, resp.status_int)
+        data = jsonutils.loads(resp.body)
+        expected = v3_VERSION_RESPONSE
+        self._paste_in_port(expected['version'],
+                            'http://localhost:%s/v3/' %
+                            CONF.eventlet_server.admin_port)
+        self.assertEqual(expected, data)
+
+    def test_use_site_url_if_endpoint_unset_v3(self):
+        self.config_fixture.config(public_endpoint=None, admin_endpoint=None)
+        for app in (self.public_app, self.admin_app):
+            client = tests.TestClient(app)
+            resp = client.get('/v3/')
+            self.assertEqual(200, resp.status_int)
+            data = jsonutils.loads(resp.body)
+            expected = v3_VERSION_RESPONSE
+            self._paste_in_port(expected['version'], 'http://localhost/v3/')
+            self.assertEqual(expected, data)
+
+    @mock.patch.object(controllers, '_VERSIONS', ['v3'])
+    def test_v2_disabled(self):
+        client = tests.TestClient(self.public_app)
+        # request to /v2.0 should fail
+        resp = client.get('/v2.0/')
+        self.assertEqual(404, resp.status_int)
+
+        # request to /v3 should pass
+        resp = client.get('/v3/')
+        self.assertEqual(200, resp.status_int)
+        data = jsonutils.loads(resp.body)
+        expected = v3_VERSION_RESPONSE
+        self._paste_in_port(expected['version'],
+                            'http://localhost:%s/v3/' %
+                            CONF.eventlet_server.public_port)
+        self.assertEqual(expected, data)
+
+        # only v3 information should be displayed by requests to /
+        v3_only_response = {
+            "versions": {
+                "values": [
+                    v3_EXPECTED_RESPONSE
+                ]
+            }
+        }
+        self._paste_in_port(v3_only_response['versions']['values'][0],
+                            'http://localhost:%s/v3/' %
+                            CONF.eventlet_server.public_port)
+        resp = client.get('/')
+        self.assertEqual(300, resp.status_int)
+        data = jsonutils.loads(resp.body)
+        self.assertEqual(v3_only_response, data)
+
+    @mock.patch.object(controllers, '_VERSIONS', ['v2.0'])
+    def test_v3_disabled(self):
+        client = tests.TestClient(self.public_app)
+        # request to /v3 should fail
+        resp = client.get('/v3/')
+        self.assertEqual(404, resp.status_int)
+
+        # request to /v2.0 should pass
+        resp = client.get('/v2.0/')
+        self.assertEqual(200, resp.status_int)
+        data = jsonutils.loads(resp.body)
+        expected = v2_VERSION_RESPONSE
+        self._paste_in_port(expected['version'],
+                            'http://localhost:%s/v2.0/' %
+                            CONF.eventlet_server.public_port)
+        self.assertEqual(expected, data)
+
+        # only v2 information should be displayed by requests to /
+        v2_only_response = {
+            "versions": {
+                "values": [
+                    v2_EXPECTED_RESPONSE
+                ]
+            }
+        }
+        self._paste_in_port(v2_only_response['versions']['values'][0],
+                            'http://localhost:%s/v2.0/' %
+                            CONF.eventlet_server.public_port)
+        resp = client.get('/')
+        self.assertEqual(300, resp.status_int)
+        data = jsonutils.loads(resp.body)
+        self.assertEqual(v2_only_response, data)
+
+    def _test_json_home(self, path, exp_json_home_data):
+        client = tests.TestClient(self.public_app)
+        resp = client.get(path, headers={'Accept': 'application/json-home'})
+
+        self.assertThat(resp.status, tt_matchers.Equals('200 OK'))
+        self.assertThat(resp.headers['Content-Type'],
+                        tt_matchers.Equals('application/json-home'))
+
+        self.assertThat(jsonutils.loads(resp.body),
+                        tt_matchers.Equals(exp_json_home_data))
+
+    def test_json_home_v3(self):
+        # If the request is /v3 and the Accept header is application/json-home
+        # then the server responds with a JSON Home document.
+
+        exp_json_home_data = {
+            'resources': V3_JSON_HOME_RESOURCES_INHERIT_DISABLED}
+
+        self._test_json_home('/v3', exp_json_home_data)
+
+    def test_json_home_root(self):
+        # If the request is / and the Accept header is application/json-home
+        # then the server responds with a JSON Home document.
+
+        exp_json_home_data = copy.deepcopy({
+            'resources': V3_JSON_HOME_RESOURCES_INHERIT_DISABLED})
+        json_home.translate_urls(exp_json_home_data, '/v3')
+
+        self._test_json_home('/', exp_json_home_data)
+
+    def test_accept_type_handling(self):
+        # Accept headers with multiple types and qvalues are handled.
+
+        def make_request(accept_types=None):
+            client = tests.TestClient(self.public_app)
+            headers = None
+            if accept_types:
+                headers = {'Accept': accept_types}
+            resp = client.get('/v3', headers=headers)
+            self.assertThat(resp.status, tt_matchers.Equals('200 OK'))
+            return resp.headers['Content-Type']
+
+        JSON = controllers.MimeTypes.JSON
+        JSON_HOME = controllers.MimeTypes.JSON_HOME
+
+        JSON_MATCHER = tt_matchers.Equals(JSON)
+        JSON_HOME_MATCHER = tt_matchers.Equals(JSON_HOME)
+
+        # Default is JSON.
+        self.assertThat(make_request(), JSON_MATCHER)
+
+        # Can request JSON and get JSON.
+        self.assertThat(make_request(JSON), JSON_MATCHER)
+
+        # Can request JSONHome and get JSONHome.
+        self.assertThat(make_request(JSON_HOME), JSON_HOME_MATCHER)
+
+        # If request JSON, JSON Home get JSON.
+        accept_types = '%s, %s' % (JSON, JSON_HOME)
+        self.assertThat(make_request(accept_types), JSON_MATCHER)
+
+        # If request JSON Home, JSON get JSON.
+        accept_types = '%s, %s' % (JSON_HOME, JSON)
+        self.assertThat(make_request(accept_types), JSON_MATCHER)
+
+        # If request JSON Home, JSON;q=0.5 get JSON Home.
+        accept_types = '%s, %s;q=0.5' % (JSON_HOME, JSON)
+        self.assertThat(make_request(accept_types), JSON_HOME_MATCHER)
+
+        # If request some unknown mime-type, get JSON.
+        self.assertThat(make_request(self.getUniqueString()), JSON_MATCHER)
+
+    @mock.patch.object(controllers, '_VERSIONS', [])
+    def test_no_json_home_document_returned_when_v3_disabled(self):
+        json_home_document = controllers.request_v3_json_home('some_prefix')
+        expected_document = {'resources': {}}
+        self.assertEqual(expected_document, json_home_document)
+
+    def test_extension_property_method_returns_none(self):
+        extension_obj = controllers.Extensions()
+        extensions_property = extension_obj.extensions
+        self.assertIsNone(extensions_property)
+
+
+class VersionSingleAppTestCase(tests.TestCase):
+    """Tests running with a single application loaded.
+
+    These are important because when Keystone is running in Apache httpd
+    there's only one application loaded for each instance.
+
+    """
+
+    def setUp(self):
+        super(VersionSingleAppTestCase, self).setUp()
+        self.load_backends()
+
+        self.config_fixture.config(
+            public_endpoint='http://localhost:%(public_port)d',
+            admin_endpoint='http://localhost:%(admin_port)d')
+
+    def config_overrides(self):
+        super(VersionSingleAppTestCase, self).config_overrides()
+        port = random.randint(10000, 30000)
+        self.config_fixture.config(group='eventlet_server', public_port=port,
+                                   admin_port=port)
+
+    def _paste_in_port(self, response, port):
+        for link in response['links']:
+            if link['rel'] == 'self':
+                link['href'] = port
+
+    def _test_version(self, app_name):
+        app = self.loadapp('keystone', app_name)
+        client = tests.TestClient(app)
+        resp = client.get('/')
+        self.assertEqual(300, resp.status_int)
+        data = jsonutils.loads(resp.body)
+        expected = VERSIONS_RESPONSE
+        for version in expected['versions']['values']:
+            if version['id'] == 'v3.0':
+                self._paste_in_port(
+                    version, 'http://localhost:%s/v3/' %
+                    CONF.eventlet_server.public_port)
+            elif version['id'] == 'v2.0':
+                self._paste_in_port(
+                    version, 'http://localhost:%s/v2.0/' %
+                    CONF.eventlet_server.public_port)
+        self.assertThat(data, _VersionsEqual(expected))
+
+    def test_public(self):
+        self._test_version('main')
+
+    def test_admin(self):
+        self._test_version('admin')
+
+
+class VersionInheritEnabledTestCase(tests.TestCase):
+    def setUp(self):
+        super(VersionInheritEnabledTestCase, self).setUp()
+        self.load_backends()
+        self.public_app = self.loadapp('keystone', 'main')
+        self.admin_app = self.loadapp('keystone', 'admin')
+
+        self.config_fixture.config(
+            public_endpoint='http://localhost:%(public_port)d',
+            admin_endpoint='http://localhost:%(admin_port)d')
+
+    def config_overrides(self):
+        super(VersionInheritEnabledTestCase, self).config_overrides()
+        port = random.randint(10000, 30000)
+        self.config_fixture.config(group='eventlet_server', public_port=port,
+                                   admin_port=port)
+
+        self.config_fixture.config(group='os_inherit', enabled=True)
+
+    def test_json_home_v3(self):
+        # If the request is /v3 and the Accept header is application/json-home
+        # then the server responds with a JSON Home document.
+
+        client = tests.TestClient(self.public_app)
+        resp = client.get('/v3/', headers={'Accept': 'application/json-home'})
+
+        self.assertThat(resp.status, tt_matchers.Equals('200 OK'))
+        self.assertThat(resp.headers['Content-Type'],
+                        tt_matchers.Equals('application/json-home'))
+
+        exp_json_home_data = {
+            'resources': V3_JSON_HOME_RESOURCES_INHERIT_ENABLED}
+
+        self.assertThat(jsonutils.loads(resp.body),
+                        tt_matchers.Equals(exp_json_home_data))
+
+
+class VersionBehindSslTestCase(tests.TestCase):
+    def setUp(self):
+        super(VersionBehindSslTestCase, self).setUp()
+        self.load_backends()
+        self.public_app = self.loadapp('keystone', 'main')
+
+    def config_overrides(self):
+        super(VersionBehindSslTestCase, self).config_overrides()
+        self.config_fixture.config(
+            secure_proxy_ssl_header='HTTP_X_FORWARDED_PROTO')
+
+    def _paste_in_port(self, response, port):
+        for link in response['links']:
+            if link['rel'] == 'self':
+                link['href'] = port
+
+    def _get_expected(self, host):
+        expected = VERSIONS_RESPONSE
+        for version in expected['versions']['values']:
+            if version['id'] == 'v3.0':
+                self._paste_in_port(version, host + 'v3/')
+            elif version['id'] == 'v2.0':
+                self._paste_in_port(version, host + 'v2.0/')
+        return expected
+
+    def test_versions_without_headers(self):
+        client = tests.TestClient(self.public_app)
+        host_name = 'host-%d' % random.randint(10, 30)
+        host_port = random.randint(10000, 30000)
+        host = 'http://%s:%s/' % (host_name, host_port)
+        resp = client.get(host)
+        self.assertEqual(300, resp.status_int)
+        data = jsonutils.loads(resp.body)
+        expected = self._get_expected(host)
+        self.assertThat(data, _VersionsEqual(expected))
+
+    def test_versions_with_header(self):
+        client = tests.TestClient(self.public_app)
+        host_name = 'host-%d' % random.randint(10, 30)
+        host_port = random.randint(10000, 30000)
+        resp = client.get('http://%s:%s/' % (host_name, host_port),
+                          headers={'X-Forwarded-Proto': 'https'})
+        self.assertEqual(300, resp.status_int)
+        data = jsonutils.loads(resp.body)
+        expected = self._get_expected('https://%s:%s/' % (host_name,
+                                                          host_port))
+        self.assertThat(data, _VersionsEqual(expected))
diff --git a/keystone-moon/keystone/tests/unit/test_wsgi.py b/keystone-moon/keystone/tests/unit/test_wsgi.py
new file mode 100644 (file)
index 0000000..1785dd0
--- /dev/null
@@ -0,0 +1,427 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import gettext
+import socket
+import uuid
+
+import mock
+import oslo_i18n
+from oslo_serialization import jsonutils
+import six
+from testtools import matchers
+import webob
+
+from keystone.common import environment
+from keystone.common import wsgi
+from keystone import exception
+from keystone.tests import unit as tests
+
+
+class FakeApp(wsgi.Application):
+    def index(self, context):
+        return {'a': 'b'}
+
+
+class FakeAttributeCheckerApp(wsgi.Application):
+    def index(self, context):
+        return context['query_string']
+
+    def assert_attribute(self, body, attr):
+        """Asserts that the given request has a certain attribute."""
+        ref = jsonutils.loads(body)
+        self._require_attribute(ref, attr)
+
+    def assert_attributes(self, body, attr):
+        """Asserts that the given request has a certain set attributes."""
+        ref = jsonutils.loads(body)
+        self._require_attributes(ref, attr)
+
+
+class BaseWSGITest(tests.TestCase):
+    def setUp(self):
+        self.app = FakeApp()
+        super(BaseWSGITest, self).setUp()
+
+    def _make_request(self, url='/'):
+        req = webob.Request.blank(url)
+        args = {'action': 'index', 'controller': None}
+        req.environ['wsgiorg.routing_args'] = [None, args]
+        return req
+
+
+class ApplicationTest(BaseWSGITest):
+    def test_response_content_type(self):
+        req = self._make_request()
+        resp = req.get_response(self.app)
+        self.assertEqual(resp.content_type, 'application/json')
+
+    def test_query_string_available(self):
+        class FakeApp(wsgi.Application):
+            def index(self, context):
+                return context['query_string']
+        req = self._make_request(url='/?1=2')
+        resp = req.get_response(FakeApp())
+        self.assertEqual(jsonutils.loads(resp.body), {'1': '2'})
+
+    def test_headers_available(self):
+        class FakeApp(wsgi.Application):
+            def index(self, context):
+                return context['headers']
+
+        app = FakeApp()
+        req = self._make_request(url='/?1=2')
+        req.headers['X-Foo'] = "bar"
+        resp = req.get_response(app)
+        self.assertIn('X-Foo', eval(resp.body))
+
+    def test_render_response(self):
+        data = {'attribute': 'value'}
+        body = b'{"attribute": "value"}'
+
+        resp = wsgi.render_response(body=data)
+        self.assertEqual('200 OK', resp.status)
+        self.assertEqual(200, resp.status_int)
+        self.assertEqual(body, resp.body)
+        self.assertEqual('X-Auth-Token', resp.headers.get('Vary'))
+        self.assertEqual(str(len(body)), resp.headers.get('Content-Length'))
+
+    def test_render_response_custom_status(self):
+        resp = wsgi.render_response(status=(501, 'Not Implemented'))
+        self.assertEqual('501 Not Implemented', resp.status)
+        self.assertEqual(501, resp.status_int)
+
+    def test_successful_require_attribute(self):
+        app = FakeAttributeCheckerApp()
+        req = self._make_request(url='/?1=2')
+        resp = req.get_response(app)
+        app.assert_attribute(resp.body, '1')
+
+    def test_require_attribute_fail_if_attribute_not_present(self):
+        app = FakeAttributeCheckerApp()
+        req = self._make_request(url='/?1=2')
+        resp = req.get_response(app)
+        self.assertRaises(exception.ValidationError,
+                          app.assert_attribute, resp.body, 'a')
+
+    def test_successful_require_multiple_attributes(self):
+        app = FakeAttributeCheckerApp()
+        req = self._make_request(url='/?a=1&b=2')
+        resp = req.get_response(app)
+        app.assert_attributes(resp.body, ['a', 'b'])
+
+    def test_attribute_missing_from_request(self):
+        app = FakeAttributeCheckerApp()
+        req = self._make_request(url='/?a=1&b=2')
+        resp = req.get_response(app)
+        ex = self.assertRaises(exception.ValidationError,
+                               app.assert_attributes,
+                               resp.body, ['a', 'missing_attribute'])
+        self.assertThat(six.text_type(ex),
+                        matchers.Contains('missing_attribute'))
+
+    def test_no_required_attributes_present(self):
+        app = FakeAttributeCheckerApp()
+        req = self._make_request(url='/')
+        resp = req.get_response(app)
+
+        ex = self.assertRaises(exception.ValidationError,
+                               app.assert_attributes, resp.body,
+                               ['missing_attribute1', 'missing_attribute2'])
+        self.assertThat(six.text_type(ex),
+                        matchers.Contains('missing_attribute1'))
+        self.assertThat(six.text_type(ex),
+                        matchers.Contains('missing_attribute2'))
+
+    def test_render_response_custom_headers(self):
+        resp = wsgi.render_response(headers=[('Custom-Header', 'Some-Value')])
+        self.assertEqual('Some-Value', resp.headers.get('Custom-Header'))
+        self.assertEqual('X-Auth-Token', resp.headers.get('Vary'))
+
+    def test_render_response_no_body(self):
+        resp = wsgi.render_response()
+        self.assertEqual('204 No Content', resp.status)
+        self.assertEqual(204, resp.status_int)
+        self.assertEqual(b'', resp.body)
+        self.assertEqual('0', resp.headers.get('Content-Length'))
+        self.assertIsNone(resp.headers.get('Content-Type'))
+
+    def test_render_response_head_with_body(self):
+        resp = wsgi.render_response({'id': uuid.uuid4().hex}, method='HEAD')
+        self.assertEqual(200, resp.status_int)
+        self.assertEqual(b'', resp.body)
+        self.assertNotEqual(resp.headers.get('Content-Length'), '0')
+        self.assertEqual('application/json', resp.headers.get('Content-Type'))
+
+    def test_application_local_config(self):
+        class FakeApp(wsgi.Application):
+            def __init__(self, *args, **kwargs):
+                self.kwargs = kwargs
+
+        app = FakeApp.factory({}, testkey="test")
+        self.assertIn("testkey", app.kwargs)
+        self.assertEqual("test", app.kwargs["testkey"])
+
+    def test_render_exception(self):
+        e = exception.Unauthorized(message=u'\u7f51\u7edc')
+        resp = wsgi.render_exception(e)
+        self.assertEqual(401, resp.status_int)
+
+    def test_render_exception_host(self):
+        e = exception.Unauthorized(message=u'\u7f51\u7edc')
+        context = {'host_url': 'http://%s:5000' % uuid.uuid4().hex}
+        resp = wsgi.render_exception(e, context=context)
+
+        self.assertEqual(401, resp.status_int)
+
+
+class ExtensionRouterTest(BaseWSGITest):
+    def test_extensionrouter_local_config(self):
+        class FakeRouter(wsgi.ExtensionRouter):
+            def __init__(self, *args, **kwargs):
+                self.kwargs = kwargs
+
+        factory = FakeRouter.factory({}, testkey="test")
+        app = factory(self.app)
+        self.assertIn("testkey", app.kwargs)
+        self.assertEqual("test", app.kwargs["testkey"])
+
+
+class MiddlewareTest(BaseWSGITest):
+    def test_middleware_request(self):
+        class FakeMiddleware(wsgi.Middleware):
+            def process_request(self, req):
+                req.environ['fake_request'] = True
+                return req
+        req = self._make_request()
+        resp = FakeMiddleware(None)(req)
+        self.assertIn('fake_request', resp.environ)
+
+    def test_middleware_response(self):
+        class FakeMiddleware(wsgi.Middleware):
+            def process_response(self, request, response):
+                response.environ = {}
+                response.environ['fake_response'] = True
+                return response
+        req = self._make_request()
+        resp = FakeMiddleware(self.app)(req)
+        self.assertIn('fake_response', resp.environ)
+
+    def test_middleware_bad_request(self):
+        class FakeMiddleware(wsgi.Middleware):
+            def process_response(self, request, response):
+                raise exception.Unauthorized()
+
+        req = self._make_request()
+        req.environ['REMOTE_ADDR'] = '127.0.0.1'
+        resp = FakeMiddleware(self.app)(req)
+        self.assertEqual(exception.Unauthorized.code, resp.status_int)
+
+    def test_middleware_type_error(self):
+        class FakeMiddleware(wsgi.Middleware):
+            def process_response(self, request, response):
+                raise TypeError()
+
+        req = self._make_request()
+        req.environ['REMOTE_ADDR'] = '127.0.0.1'
+        resp = FakeMiddleware(self.app)(req)
+        # This is a validationerror type
+        self.assertEqual(exception.ValidationError.code, resp.status_int)
+
+    def test_middleware_exception_error(self):
+
+        exception_str = b'EXCEPTIONERROR'
+
+        class FakeMiddleware(wsgi.Middleware):
+            def process_response(self, request, response):
+                raise exception.UnexpectedError(exception_str)
+
+        def do_request():
+            req = self._make_request()
+            resp = FakeMiddleware(self.app)(req)
+            self.assertEqual(exception.UnexpectedError.code, resp.status_int)
+            return resp
+
+        # Exception data should not be in the message when debug is False
+        self.config_fixture.config(debug=False)
+        self.assertNotIn(exception_str, do_request().body)
+
+        # Exception data should be in the message when debug is True
+        self.config_fixture.config(debug=True)
+        self.assertIn(exception_str, do_request().body)
+
+    def test_middleware_local_config(self):
+        class FakeMiddleware(wsgi.Middleware):
+            def __init__(self, *args, **kwargs):
+                self.kwargs = kwargs
+
+        factory = FakeMiddleware.factory({}, testkey="test")
+        app = factory(self.app)
+        self.assertIn("testkey", app.kwargs)
+        self.assertEqual("test", app.kwargs["testkey"])
+
+
+class LocalizedResponseTest(tests.TestCase):
+    def test_request_match_default(self):
+        # The default language if no Accept-Language is provided is None
+        req = webob.Request.blank('/')
+        self.assertIsNone(wsgi.best_match_language(req))
+
+    @mock.patch.object(oslo_i18n, 'get_available_languages')
+    def test_request_match_language_expected(self, mock_gal):
+        # If Accept-Language is a supported language, best_match_language()
+        # returns it.
+
+        language = uuid.uuid4().hex
+        mock_gal.return_value = [language]
+
+        req = webob.Request.blank('/', headers={'Accept-Language': language})
+        self.assertEqual(language, wsgi.best_match_language(req))
+
+    @mock.patch.object(oslo_i18n, 'get_available_languages')
+    def test_request_match_language_unexpected(self, mock_gal):
+        # If Accept-Language is a language we do not support,
+        # best_match_language() returns None.
+
+        supported_language = uuid.uuid4().hex
+        mock_gal.return_value = [supported_language]
+
+        request_language = uuid.uuid4().hex
+        req = webob.Request.blank(
+            '/', headers={'Accept-Language': request_language})
+        self.assertIsNone(wsgi.best_match_language(req))
+
+    def test_static_translated_string_is_lazy_translatable(self):
+        # Statically created message strings are an object that can get
+        # lazy-translated rather than a regular string.
+        self.assertNotEqual(type(exception.Unauthorized.message_format),
+                            six.text_type)
+
+    @mock.patch.object(oslo_i18n, 'get_available_languages')
+    def test_get_localized_response(self, mock_gal):
+        # If the request has the Accept-Language set to a supported language
+        # and an exception is raised by the application that is translatable
+        # then the response will have the translated message.
+
+        language = uuid.uuid4().hex
+        mock_gal.return_value = [language]
+
+        # The arguments for the xlated message format have to match the args
+        # for the chosen exception (exception.NotFound)
+        xlated_msg_fmt = "Xlated NotFound, %(target)s."
+
+        # Fake out gettext.translation() to return a translator for our
+        # expected language and a passthrough translator for other langs.
+
+        def fake_translation(*args, **kwargs):
+            class IdentityTranslator(object):
+                def ugettext(self, msgid):
+                    return msgid
+
+                gettext = ugettext
+
+            class LangTranslator(object):
+                def ugettext(self, msgid):
+                    if msgid == exception.NotFound.message_format:
+                        return xlated_msg_fmt
+                    return msgid
+
+                gettext = ugettext
+
+            if language in kwargs.get('languages', []):
+                return LangTranslator()
+            return IdentityTranslator()
+
+        with mock.patch.object(gettext, 'translation',
+                               side_effect=fake_translation) as xlation_mock:
+            target = uuid.uuid4().hex
+
+            # Fake app raises NotFound exception to simulate Keystone raising.
+
+            class FakeApp(wsgi.Application):
+                def index(self, context):
+                    raise exception.NotFound(target=target)
+
+            # Make the request with Accept-Language on the app, expect an error
+            # response with the translated message.
+
+            req = webob.Request.blank('/')
+            args = {'action': 'index', 'controller': None}
+            req.environ['wsgiorg.routing_args'] = [None, args]
+            req.headers['Accept-Language'] = language
+            resp = req.get_response(FakeApp())
+
+            # Assert that the translated message appears in the response.
+
+            exp_msg = xlated_msg_fmt % dict(target=target)
+            self.assertThat(resp.json['error']['message'],
+                            matchers.Equals(exp_msg))
+            self.assertThat(xlation_mock.called, matchers.Equals(True))
+
+
+class ServerTest(tests.TestCase):
+
+    def setUp(self):
+        super(ServerTest, self).setUp()
+        self.host = '127.0.0.1'
+        self.port = '1234'
+
+    @mock.patch('eventlet.listen')
+    @mock.patch('socket.getaddrinfo')
+    def test_keepalive_unset(self, mock_getaddrinfo, mock_listen):
+        mock_getaddrinfo.return_value = [(1, 2, 3, 4, 5)]
+        mock_sock_dup = mock_listen.return_value.dup.return_value
+
+        server = environment.Server(mock.MagicMock(), host=self.host,
+                                    port=self.port)
+        server.start()
+        self.addCleanup(server.stop)
+        self.assertTrue(mock_listen.called)
+        self.assertFalse(mock_sock_dup.setsockopt.called)
+
+    @mock.patch('eventlet.listen')
+    @mock.patch('socket.getaddrinfo')
+    def test_keepalive_set(self, mock_getaddrinfo, mock_listen):
+        mock_getaddrinfo.return_value = [(1, 2, 3, 4, 5)]
+        mock_sock_dup = mock_listen.return_value.dup.return_value
+
+        server = environment.Server(mock.MagicMock(), host=self.host,
+                                    port=self.port, keepalive=True)
+        server.start()
+        self.addCleanup(server.stop)
+        mock_sock_dup.setsockopt.assert_called_once_with(socket.SOL_SOCKET,
+                                                         socket.SO_KEEPALIVE,
+                                                         1)
+        self.assertTrue(mock_listen.called)
+
+    @mock.patch('eventlet.listen')
+    @mock.patch('socket.getaddrinfo')
+    def test_keepalive_and_keepidle_set(self, mock_getaddrinfo, mock_listen):
+        mock_getaddrinfo.return_value = [(1, 2, 3, 4, 5)]
+        mock_sock_dup = mock_listen.return_value.dup.return_value
+
+        server = environment.Server(mock.MagicMock(), host=self.host,
+                                    port=self.port, keepalive=True,
+                                    keepidle=1)
+        server.start()
+        self.addCleanup(server.stop)
+
+        self.assertEqual(2, mock_sock_dup.setsockopt.call_count)
+
+        # Test the last set of call args i.e. for the keepidle
+        mock_sock_dup.setsockopt.assert_called_with(socket.IPPROTO_TCP,
+                                                    socket.TCP_KEEPIDLE,
+                                                    1)
+
+        self.assertTrue(mock_listen.called)
diff --git a/keystone-moon/keystone/tests/unit/tests/__init__.py b/keystone-moon/keystone/tests/unit/tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/tests/unit/tests/test_core.py b/keystone-moon/keystone/tests/unit/tests/test_core.py
new file mode 100644 (file)
index 0000000..86c91a8
--- /dev/null
@@ -0,0 +1,62 @@
+# Copyright 2014 IBM Corp.
+#
+#   Licensed under the Apache License, Version 2.0 (the "License"); you may
+#   not use this file except in compliance with the License. You may obtain
+#   a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#   WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#   License for the specific language governing permissions and limitations
+#   under the License.
+
+import sys
+import warnings
+
+from oslo_log import log
+from sqlalchemy import exc
+from testtools import matchers
+
+from keystone.tests import unit as tests
+
+
+LOG = log.getLogger(__name__)
+
+
+class BaseTestTestCase(tests.BaseTestCase):
+
+    def test_unexpected_exit(self):
+        # if a test calls sys.exit it raises rather than exiting.
+        self.assertThat(lambda: sys.exit(),
+                        matchers.raises(tests.UnexpectedExit))
+
+
+class TestTestCase(tests.TestCase):
+
+    def test_bad_log(self):
+        # If the arguments are invalid for the string in a log it raises an
+        # exception during testing.
+        self.assertThat(
+            lambda: LOG.warn('String %(p1)s %(p2)s', {'p1': 'something'}),
+            matchers.raises(tests.BadLog))
+
+    def test_sa_warning(self):
+        self.assertThat(
+            lambda: warnings.warn('test sa warning error', exc.SAWarning),
+            matchers.raises(exc.SAWarning))
+
+    def test_deprecations(self):
+        # If any deprecation warnings occur during testing it's raised as
+        # exception.
+
+        def use_deprecated():
+            # DeprecationWarning: BaseException.message has been deprecated as
+            # of Python 2.6
+            try:
+                raise Exception('something')
+            except Exception as e:
+                e.message
+
+        self.assertThat(use_deprecated, matchers.raises(DeprecationWarning))
diff --git a/keystone-moon/keystone/tests/unit/tests/test_utils.py b/keystone-moon/keystone/tests/unit/tests/test_utils.py
new file mode 100644 (file)
index 0000000..22c485c
--- /dev/null
@@ -0,0 +1,37 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from testtools import matchers
+from testtools import testcase
+
+from keystone.tests.unit import utils
+
+
+class TestWipDecorator(testcase.TestCase):
+
+    def test_raises_SkipError_when_broken_test_fails(self):
+
+        @utils.wip('waiting on bug #000000')
+        def test():
+            raise Exception('i expected a failure - this is a WIP')
+
+        e = self.assertRaises(testcase.TestSkipped, test)
+        self.assertThat(str(e), matchers.Contains('#000000'))
+
+    def test_raises_AssertionError_when_test_passes(self):
+
+        @utils.wip('waiting on bug #000000')
+        def test():
+            pass  # literally
+
+        e = self.assertRaises(AssertionError, test)
+        self.assertThat(str(e), matchers.Contains('#000000'))
diff --git a/keystone-moon/keystone/tests/unit/token/__init__.py b/keystone-moon/keystone/tests/unit/token/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/tests/unit/token/test_fernet_provider.py b/keystone-moon/keystone/tests/unit/token/test_fernet_provider.py
new file mode 100644 (file)
index 0000000..23fc021
--- /dev/null
@@ -0,0 +1,183 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import uuid
+
+from oslo_utils import timeutils
+
+from keystone.common import config
+from keystone import exception
+from keystone.tests import unit as tests
+from keystone.tests.unit import ksfixtures
+from keystone.token import provider
+from keystone.token.providers import fernet
+from keystone.token.providers.fernet import token_formatters
+
+
+CONF = config.CONF
+
+
+class TestFernetTokenProvider(tests.TestCase):
+    def setUp(self):
+        super(TestFernetTokenProvider, self).setUp()
+        self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
+        self.provider = fernet.Provider()
+
+    def test_get_token_id_raises_not_implemented(self):
+        """Test that an exception is raised when calling _get_token_id."""
+        token_data = {}
+        self.assertRaises(exception.NotImplemented,
+                          self.provider._get_token_id, token_data)
+
+    def test_invalid_v3_token_raises_401(self):
+        self.assertRaises(
+            exception.Unauthorized,
+            self.provider.validate_v3_token,
+            uuid.uuid4().hex)
+
+    def test_invalid_v2_token_raises_401(self):
+        self.assertRaises(
+            exception.Unauthorized,
+            self.provider.validate_v2_token,
+            uuid.uuid4().hex)
+
+
+class TestPayloads(tests.TestCase):
+    def test_uuid_hex_to_byte_conversions(self):
+        payload_cls = token_formatters.BasePayload
+
+        expected_hex_uuid = uuid.uuid4().hex
+        uuid_obj = uuid.UUID(expected_hex_uuid)
+        expected_uuid_in_bytes = uuid_obj.bytes
+        actual_uuid_in_bytes = payload_cls.convert_uuid_hex_to_bytes(
+            expected_hex_uuid)
+        self.assertEqual(expected_uuid_in_bytes, actual_uuid_in_bytes)
+        actual_hex_uuid = payload_cls.convert_uuid_bytes_to_hex(
+            expected_uuid_in_bytes)
+        self.assertEqual(expected_hex_uuid, actual_hex_uuid)
+
+    def test_time_string_to_int_conversions(self):
+        payload_cls = token_formatters.BasePayload
+
+        expected_time_str = timeutils.isotime()
+        time_obj = timeutils.parse_isotime(expected_time_str)
+        expected_time_int = (
+            (timeutils.normalize_time(time_obj) -
+             datetime.datetime.utcfromtimestamp(0)).total_seconds())
+
+        actual_time_int = payload_cls._convert_time_string_to_int(
+            expected_time_str)
+        self.assertEqual(expected_time_int, actual_time_int)
+
+        actual_time_str = payload_cls._convert_int_to_time_string(
+            actual_time_int)
+        self.assertEqual(expected_time_str, actual_time_str)
+
+    def test_unscoped_payload(self):
+        exp_user_id = uuid.uuid4().hex
+        exp_methods = ['password']
+        exp_expires_at = timeutils.isotime(timeutils.utcnow())
+        exp_audit_ids = [provider.random_urlsafe_str()]
+
+        payload = token_formatters.UnscopedPayload.assemble(
+            exp_user_id, exp_methods, exp_expires_at, exp_audit_ids)
+
+        (user_id, methods, expires_at, audit_ids) = (
+            token_formatters.UnscopedPayload.disassemble(payload))
+
+        self.assertEqual(exp_user_id, user_id)
+        self.assertEqual(exp_methods, methods)
+        self.assertEqual(exp_expires_at, expires_at)
+        self.assertEqual(exp_audit_ids, audit_ids)
+
+    def test_project_scoped_payload(self):
+        exp_user_id = uuid.uuid4().hex
+        exp_methods = ['password']
+        exp_project_id = uuid.uuid4().hex
+        exp_expires_at = timeutils.isotime(timeutils.utcnow())
+        exp_audit_ids = [provider.random_urlsafe_str()]
+
+        payload = token_formatters.ProjectScopedPayload.assemble(
+            exp_user_id, exp_methods, exp_project_id, exp_expires_at,
+            exp_audit_ids)
+
+        (user_id, methods, project_id, expires_at, audit_ids) = (
+            token_formatters.ProjectScopedPayload.disassemble(payload))
+
+        self.assertEqual(exp_user_id, user_id)
+        self.assertEqual(exp_methods, methods)
+        self.assertEqual(exp_project_id, project_id)
+        self.assertEqual(exp_expires_at, expires_at)
+        self.assertEqual(exp_audit_ids, audit_ids)
+
+    def test_domain_scoped_payload(self):
+        exp_user_id = uuid.uuid4().hex
+        exp_methods = ['password']
+        exp_domain_id = uuid.uuid4().hex
+        exp_expires_at = timeutils.isotime(timeutils.utcnow())
+        exp_audit_ids = [provider.random_urlsafe_str()]
+
+        payload = token_formatters.DomainScopedPayload.assemble(
+            exp_user_id, exp_methods, exp_domain_id, exp_expires_at,
+            exp_audit_ids)
+
+        (user_id, methods, domain_id, expires_at, audit_ids) = (
+            token_formatters.DomainScopedPayload.disassemble(payload))
+
+        self.assertEqual(exp_user_id, user_id)
+        self.assertEqual(exp_methods, methods)
+        self.assertEqual(exp_domain_id, domain_id)
+        self.assertEqual(exp_expires_at, expires_at)
+        self.assertEqual(exp_audit_ids, audit_ids)
+
+    def test_domain_scoped_payload_with_default_domain(self):
+        exp_user_id = uuid.uuid4().hex
+        exp_methods = ['password']
+        exp_domain_id = CONF.identity.default_domain_id
+        exp_expires_at = timeutils.isotime(timeutils.utcnow())
+        exp_audit_ids = [provider.random_urlsafe_str()]
+
+        payload = token_formatters.DomainScopedPayload.assemble(
+            exp_user_id, exp_methods, exp_domain_id, exp_expires_at,
+            exp_audit_ids)
+
+        (user_id, methods, domain_id, expires_at, audit_ids) = (
+            token_formatters.DomainScopedPayload.disassemble(payload))
+
+        self.assertEqual(exp_user_id, user_id)
+        self.assertEqual(exp_methods, methods)
+        self.assertEqual(exp_domain_id, domain_id)
+        self.assertEqual(exp_expires_at, expires_at)
+        self.assertEqual(exp_audit_ids, audit_ids)
+
+    def test_trust_scoped_payload(self):
+        exp_user_id = uuid.uuid4().hex
+        exp_methods = ['password']
+        exp_project_id = uuid.uuid4().hex
+        exp_expires_at = timeutils.isotime(timeutils.utcnow())
+        exp_audit_ids = [provider.random_urlsafe_str()]
+        exp_trust_id = uuid.uuid4().hex
+
+        payload = token_formatters.TrustScopedPayload.assemble(
+            exp_user_id, exp_methods, exp_project_id, exp_expires_at,
+            exp_audit_ids, exp_trust_id)
+
+        (user_id, methods, project_id, expires_at, audit_ids, trust_id) = (
+            token_formatters.TrustScopedPayload.disassemble(payload))
+
+        self.assertEqual(exp_user_id, user_id)
+        self.assertEqual(exp_methods, methods)
+        self.assertEqual(exp_project_id, project_id)
+        self.assertEqual(exp_expires_at, expires_at)
+        self.assertEqual(exp_audit_ids, audit_ids)
+        self.assertEqual(exp_trust_id, trust_id)
diff --git a/keystone-moon/keystone/tests/unit/token/test_provider.py b/keystone-moon/keystone/tests/unit/token/test_provider.py
new file mode 100644 (file)
index 0000000..e591069
--- /dev/null
@@ -0,0 +1,29 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import urllib
+
+from keystone.tests import unit
+from keystone.token import provider
+
+
+class TestRandomStrings(unit.BaseTestCase):
+    def test_strings_are_url_safe(self):
+        s = provider.random_urlsafe_str()
+        self.assertEqual(s, urllib.quote_plus(s))
+
+    def test_strings_can_be_converted_to_bytes(self):
+        s = provider.random_urlsafe_str()
+        self.assertTrue(isinstance(s, basestring))
+
+        b = provider.random_urlsafe_str_to_bytes(s)
+        self.assertTrue(isinstance(b, bytes))
diff --git a/keystone-moon/keystone/tests/unit/token/test_token_data_helper.py b/keystone-moon/keystone/tests/unit/token/test_token_data_helper.py
new file mode 100644 (file)
index 0000000..a12a22d
--- /dev/null
@@ -0,0 +1,55 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import uuid
+
+from testtools import matchers
+
+from keystone import exception
+from keystone.tests import unit as tests
+from keystone.token.providers import common
+
+
+class TestTokenDataHelper(tests.TestCase):
+    def setUp(self):
+        super(TestTokenDataHelper, self).setUp()
+        self.load_backends()
+        self.v3_data_helper = common.V3TokenDataHelper()
+
+    def test_v3_token_data_helper_populate_audit_info_string(self):
+        token_data = {}
+        audit_info = base64.urlsafe_b64encode(uuid.uuid4().bytes)[:-2]
+        self.v3_data_helper._populate_audit_info(token_data, audit_info)
+        self.assertIn(audit_info, token_data['audit_ids'])
+        self.assertThat(token_data['audit_ids'], matchers.HasLength(2))
+
+    def test_v3_token_data_helper_populate_audit_info_none(self):
+        token_data = {}
+        self.v3_data_helper._populate_audit_info(token_data, audit_info=None)
+        self.assertThat(token_data['audit_ids'], matchers.HasLength(1))
+        self.assertNotIn(None, token_data['audit_ids'])
+
+    def test_v3_token_data_helper_populate_audit_info_list(self):
+        token_data = {}
+        audit_info = [base64.urlsafe_b64encode(uuid.uuid4().bytes)[:-2],
+                      base64.urlsafe_b64encode(uuid.uuid4().bytes)[:-2]]
+        self.v3_data_helper._populate_audit_info(token_data, audit_info)
+        self.assertEqual(audit_info, token_data['audit_ids'])
+
+    def test_v3_token_data_helper_populate_audit_info_invalid(self):
+        token_data = {}
+        audit_info = dict()
+        self.assertRaises(exception.UnexpectedError,
+                          self.v3_data_helper._populate_audit_info,
+                          token_data=token_data,
+                          audit_info=audit_info)
diff --git a/keystone-moon/keystone/tests/unit/token/test_token_model.py b/keystone-moon/keystone/tests/unit/token/test_token_model.py
new file mode 100644 (file)
index 0000000..b247428
--- /dev/null
@@ -0,0 +1,262 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import uuid
+
+from oslo_config import cfg
+from oslo_utils import timeutils
+
+from keystone import exception
+from keystone.models import token_model
+from keystone.tests.unit import core
+from keystone.tests.unit import test_token_provider
+
+
+CONF = cfg.CONF
+
+
+class TestKeystoneTokenModel(core.TestCase):
+    def setUp(self):
+        super(TestKeystoneTokenModel, self).setUp()
+        self.load_backends()
+        self.v2_sample_token = copy.deepcopy(
+            test_token_provider.SAMPLE_V2_TOKEN)
+        self.v3_sample_token = copy.deepcopy(
+            test_token_provider.SAMPLE_V3_TOKEN)
+
+    def test_token_model_v3(self):
+        token_data = token_model.KeystoneToken(uuid.uuid4().hex,
+                                               self.v3_sample_token)
+        self.assertIs(token_model.V3, token_data.version)
+        expires = timeutils.normalize_time(timeutils.parse_isotime(
+            self.v3_sample_token['token']['expires_at']))
+        issued = timeutils.normalize_time(timeutils.parse_isotime(
+            self.v3_sample_token['token']['issued_at']))
+        self.assertEqual(expires, token_data.expires)
+        self.assertEqual(issued, token_data.issued)
+        self.assertEqual(self.v3_sample_token['token']['user']['id'],
+                         token_data.user_id)
+        self.assertEqual(self.v3_sample_token['token']['user']['name'],
+                         token_data.user_name)
+        self.assertEqual(self.v3_sample_token['token']['user']['domain']['id'],
+                         token_data.user_domain_id)
+        self.assertEqual(
+            self.v3_sample_token['token']['user']['domain']['name'],
+            token_data.user_domain_name)
+        self.assertEqual(
+            self.v3_sample_token['token']['project']['domain']['id'],
+            token_data.project_domain_id)
+        self.assertEqual(
+            self.v3_sample_token['token']['project']['domain']['name'],
+            token_data.project_domain_name)
+        self.assertEqual(self.v3_sample_token['token']['OS-TRUST:trust']['id'],
+                         token_data.trust_id)
+        self.assertEqual(
+            self.v3_sample_token['token']['OS-TRUST:trust']['trustor_user_id'],
+            token_data.trustor_user_id)
+        self.assertEqual(
+            self.v3_sample_token['token']['OS-TRUST:trust']['trustee_user_id'],
+            token_data.trustee_user_id)
+        # Project Scoped Token
+        self.assertRaises(exception.UnexpectedError, getattr, token_data,
+                          'domain_id')
+        self.assertRaises(exception.UnexpectedError, getattr, token_data,
+                          'domain_name')
+        self.assertFalse(token_data.domain_scoped)
+        self.assertEqual(self.v3_sample_token['token']['project']['id'],
+                         token_data.project_id)
+        self.assertEqual(self.v3_sample_token['token']['project']['name'],
+                         token_data.project_name)
+        self.assertTrue(token_data.project_scoped)
+        self.assertTrue(token_data.scoped)
+        self.assertTrue(token_data.trust_scoped)
+        self.assertEqual(
+            [r['id'] for r in self.v3_sample_token['token']['roles']],
+            token_data.role_ids)
+        self.assertEqual(
+            [r['name'] for r in self.v3_sample_token['token']['roles']],
+            token_data.role_names)
+        token_data.pop('project')
+        self.assertFalse(token_data.project_scoped)
+        self.assertFalse(token_data.scoped)
+        self.assertRaises(exception.UnexpectedError, getattr, token_data,
+                          'project_id')
+        self.assertRaises(exception.UnexpectedError, getattr, token_data,
+                          'project_name')
+        self.assertFalse(token_data.project_scoped)
+        domain_id = uuid.uuid4().hex
+        domain_name = uuid.uuid4().hex
+        token_data['domain'] = {'id': domain_id,
+                                'name': domain_name}
+        self.assertEqual(domain_id, token_data.domain_id)
+        self.assertEqual(domain_name, token_data.domain_name)
+        self.assertTrue(token_data.domain_scoped)
+
+        token_data['audit_ids'] = [uuid.uuid4().hex]
+        self.assertEqual(token_data.audit_id,
+                         token_data['audit_ids'][0])
+        self.assertEqual(token_data.audit_chain_id,
+                         token_data['audit_ids'][0])
+        token_data['audit_ids'].append(uuid.uuid4().hex)
+        self.assertEqual(token_data.audit_chain_id,
+                         token_data['audit_ids'][1])
+        del token_data['audit_ids']
+        self.assertIsNone(token_data.audit_id)
+        self.assertIsNone(token_data.audit_chain_id)
+
+    def test_token_model_v3_federated_user(self):
+        token_data = token_model.KeystoneToken(token_id=uuid.uuid4().hex,
+                                               token_data=self.v3_sample_token)
+        federation_data = {'identity_provider': {'id': uuid.uuid4().hex},
+                           'protocol': {'id': 'saml2'},
+                           'groups': [{'id': uuid.uuid4().hex}
+                                      for x in range(1, 5)]}
+
+        self.assertFalse(token_data.is_federated_user)
+        self.assertEqual([], token_data.federation_group_ids)
+        self.assertIsNone(token_data.federation_protocol_id)
+        self.assertIsNone(token_data.federation_idp_id)
+
+        token_data['user'][token_model.federation.FEDERATION] = federation_data
+
+        self.assertTrue(token_data.is_federated_user)
+        self.assertEqual([x['id'] for x in federation_data['groups']],
+                         token_data.federation_group_ids)
+        self.assertEqual(federation_data['protocol']['id'],
+                         token_data.federation_protocol_id)
+        self.assertEqual(federation_data['identity_provider']['id'],
+                         token_data.federation_idp_id)
+
+    def test_token_model_v2_federated_user(self):
+        token_data = token_model.KeystoneToken(token_id=uuid.uuid4().hex,
+                                               token_data=self.v2_sample_token)
+        federation_data = {'identity_provider': {'id': uuid.uuid4().hex},
+                           'protocol': {'id': 'saml2'},
+                           'groups': [{'id': uuid.uuid4().hex}
+                                      for x in range(1, 5)]}
+        self.assertFalse(token_data.is_federated_user)
+        self.assertEqual([], token_data.federation_group_ids)
+        self.assertIsNone(token_data.federation_protocol_id)
+        self.assertIsNone(token_data.federation_idp_id)
+
+        token_data['user'][token_model.federation.FEDERATION] = federation_data
+
+        # Federated users should not exist in V2, the data should remain empty
+        self.assertFalse(token_data.is_federated_user)
+        self.assertEqual([], token_data.federation_group_ids)
+        self.assertIsNone(token_data.federation_protocol_id)
+        self.assertIsNone(token_data.federation_idp_id)
+
+    def test_token_model_v2(self):
+        token_data = token_model.KeystoneToken(uuid.uuid4().hex,
+                                               self.v2_sample_token)
+        self.assertIs(token_model.V2, token_data.version)
+        expires = timeutils.normalize_time(timeutils.parse_isotime(
+            self.v2_sample_token['access']['token']['expires']))
+        issued = timeutils.normalize_time(timeutils.parse_isotime(
+            self.v2_sample_token['access']['token']['issued_at']))
+        self.assertEqual(expires, token_data.expires)
+        self.assertEqual(issued, token_data.issued)
+        self.assertEqual(self.v2_sample_token['access']['user']['id'],
+                         token_data.user_id)
+        self.assertEqual(self.v2_sample_token['access']['user']['name'],
+                         token_data.user_name)
+        self.assertEqual(CONF.identity.default_domain_id,
+                         token_data.user_domain_id)
+        self.assertEqual('Default', token_data.user_domain_name)
+        self.assertEqual(CONF.identity.default_domain_id,
+                         token_data.project_domain_id)
+        self.assertEqual('Default',
+                         token_data.project_domain_name)
+        self.assertEqual(self.v2_sample_token['access']['trust']['id'],
+                         token_data.trust_id)
+        self.assertEqual(
+            self.v2_sample_token['access']['trust']['trustor_user_id'],
+            token_data.trustor_user_id)
+        self.assertEqual(
+            self.v2_sample_token['access']['trust']['impersonation'],
+            token_data.trust_impersonation)
+        self.assertEqual(
+            self.v2_sample_token['access']['trust']['trustee_user_id'],
+            token_data.trustee_user_id)
+        # Project Scoped Token
+        self.assertEqual(
+            self.v2_sample_token['access']['token']['tenant']['id'],
+            token_data.project_id)
+        self.assertEqual(
+            self.v2_sample_token['access']['token']['tenant']['name'],
+            token_data.project_name)
+        self.assertTrue(token_data.project_scoped)
+        self.assertTrue(token_data.scoped)
+        self.assertTrue(token_data.trust_scoped)
+        self.assertEqual(
+            [r['name']
+             for r in self.v2_sample_token['access']['user']['roles']],
+            token_data.role_names)
+        token_data['token'].pop('tenant')
+        self.assertFalse(token_data.scoped)
+        self.assertFalse(token_data.project_scoped)
+        self.assertFalse(token_data.domain_scoped)
+        self.assertRaises(exception.UnexpectedError, getattr, token_data,
+                          'project_id')
+        self.assertRaises(exception.UnexpectedError, getattr, token_data,
+                          'project_name')
+        self.assertRaises(exception.UnexpectedError, getattr, token_data,
+                          'project_domain_id')
+        self.assertRaises(exception.UnexpectedError, getattr, token_data,
+                          'project_domain_id')
+        # No Domain Scoped tokens in V2
+        self.assertRaises(NotImplementedError, getattr, token_data,
+                          'domain_id')
+        self.assertRaises(NotImplementedError, getattr, token_data,
+                          'domain_name')
+        token_data['domain'] = {'id': uuid.uuid4().hex,
+                                'name': uuid.uuid4().hex}
+        self.assertRaises(NotImplementedError, getattr, token_data,
+                          'domain_id')
+        self.assertRaises(NotImplementedError, getattr, token_data,
+                          'domain_name')
+        self.assertFalse(token_data.domain_scoped)
+
+        token_data['token']['audit_ids'] = [uuid.uuid4().hex]
+        self.assertEqual(token_data.audit_chain_id,
+                         token_data['token']['audit_ids'][0])
+        token_data['token']['audit_ids'].append(uuid.uuid4().hex)
+        self.assertEqual(token_data.audit_chain_id,
+                         token_data['token']['audit_ids'][1])
+        self.assertEqual(token_data.audit_id,
+                         token_data['token']['audit_ids'][0])
+        del token_data['token']['audit_ids']
+        self.assertIsNone(token_data.audit_id)
+        self.assertIsNone(token_data.audit_chain_id)
+
+    def test_token_model_unknown(self):
+        self.assertRaises(exception.UnsupportedTokenVersionException,
+                          token_model.KeystoneToken,
+                          token_id=uuid.uuid4().hex,
+                          token_data={'bogus_data': uuid.uuid4().hex})
+
+    def test_token_model_dual_scoped_token(self):
+        domain = {'id': uuid.uuid4().hex,
+                  'name': uuid.uuid4().hex}
+        self.v2_sample_token['access']['domain'] = domain
+        self.v3_sample_token['token']['domain'] = domain
+
+        # V2 Tokens Cannot be domain scoped, this should work
+        token_model.KeystoneToken(token_id=uuid.uuid4().hex,
+                                  token_data=self.v2_sample_token)
+
+        self.assertRaises(exception.UnexpectedError,
+                          token_model.KeystoneToken,
+                          token_id=uuid.uuid4().hex,
+                          token_data=self.v3_sample_token)
diff --git a/keystone-moon/keystone/tests/unit/utils.py b/keystone-moon/keystone/tests/unit/utils.py
new file mode 100644 (file)
index 0000000..17d1de8
--- /dev/null
@@ -0,0 +1,89 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Useful utilities for tests."""
+
+import functools
+import os
+import time
+import uuid
+
+from oslo_log import log
+import six
+from testtools import testcase
+
+
+LOG = log.getLogger(__name__)
+
+TZ = None
+
+
+def timezone(func):
+    @functools.wraps(func)
+    def wrapper(*args, **kwargs):
+        tz_original = os.environ.get('TZ')
+        try:
+            if TZ:
+                os.environ['TZ'] = TZ
+                time.tzset()
+            return func(*args, **kwargs)
+        finally:
+            if TZ:
+                if tz_original:
+                    os.environ['TZ'] = tz_original
+                else:
+                    if 'TZ' in os.environ:
+                        del os.environ['TZ']
+                time.tzset()
+    return wrapper
+
+
+def new_uuid():
+    """Return a string UUID."""
+    return uuid.uuid4().hex
+
+
+def wip(message):
+    """Mark a test as work in progress.
+
+    Based on code by Nat Pryce:
+    https://gist.github.com/npryce/997195#file-wip-py
+
+    The test will always be run. If the test fails then a TestSkipped
+    exception is raised. If the test passes an AssertionError exception
+    is raised so that the developer knows they made the test pass. This
+    is a reminder to remove the decorator.
+
+    :param message: a string message to help clarify why the test is
+                    marked as a work in progress
+
+    usage:
+      >>> @wip('waiting on bug #000000')
+      >>> def test():
+      >>>     pass
+
+    """
+
+    def _wip(f):
+        @six.wraps(f)
+        def run_test(*args, **kwargs):
+            try:
+                f(*args, **kwargs)
+            except Exception:
+                raise testcase.TestSkipped('work in progress test failed: ' +
+                                           message)
+
+            raise AssertionError('work in progress test passed: ' + message)
+
+        return run_test
+
+    return _wip
diff --git a/keystone-moon/keystone/token/__init__.py b/keystone-moon/keystone/token/__init__.py
new file mode 100644 (file)
index 0000000..a73e19f
--- /dev/null
@@ -0,0 +1,18 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.token import controllers  # noqa
+from keystone.token import persistence  # noqa
+from keystone.token import provider  # noqa
+from keystone.token import routers  # noqa
diff --git a/keystone-moon/keystone/token/controllers.py b/keystone-moon/keystone/token/controllers.py
new file mode 100644 (file)
index 0000000..3304acb
--- /dev/null
@@ -0,0 +1,523 @@
+# Copyright 2013 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import datetime
+import sys
+
+from keystoneclient.common import cms
+from oslo_config import cfg
+from oslo_log import log
+from oslo_serialization import jsonutils
+from oslo_utils import timeutils
+import six
+
+from keystone.common import controller
+from keystone.common import dependency
+from keystone.common import wsgi
+from keystone import exception
+from keystone.i18n import _
+from keystone.models import token_model
+from keystone.token import provider
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+class ExternalAuthNotApplicable(Exception):
+    """External authentication is not applicable."""
+    pass
+
+
+@dependency.requires('assignment_api', 'catalog_api', 'identity_api',
+                     'resource_api', 'role_api', 'token_provider_api',
+                     'trust_api')
+class Auth(controller.V2Controller):
+
+    @controller.v2_deprecated
+    def ca_cert(self, context, auth=None):
+        ca_file = open(CONF.signing.ca_certs, 'r')
+        data = ca_file.read()
+        ca_file.close()
+        return data
+
+    @controller.v2_deprecated
+    def signing_cert(self, context, auth=None):
+        cert_file = open(CONF.signing.certfile, 'r')
+        data = cert_file.read()
+        cert_file.close()
+        return data
+
+    @controller.v2_deprecated
+    def authenticate(self, context, auth=None):
+        """Authenticate credentials and return a token.
+
+        Accept auth as a dict that looks like::
+
+            {
+                "auth":{
+                    "passwordCredentials":{
+                        "username":"test_user",
+                        "password":"mypass"
+                    },
+                    "tenantName":"customer-x"
+                }
+            }
+
+        In this case, tenant is optional, if not provided the token will be
+        considered "unscoped" and can later be used to get a scoped token.
+
+        Alternatively, this call accepts auth with only a token and tenant
+        that will return a token that is scoped to that tenant.
+        """
+
+        if auth is None:
+            raise exception.ValidationError(attribute='auth',
+                                            target='request body')
+
+        if "token" in auth:
+            # Try to authenticate using a token
+            auth_info = self._authenticate_token(
+                context, auth)
+        else:
+            # Try external authentication
+            try:
+                auth_info = self._authenticate_external(
+                    context, auth)
+            except ExternalAuthNotApplicable:
+                # Try local authentication
+                auth_info = self._authenticate_local(
+                    context, auth)
+
+        user_ref, tenant_ref, metadata_ref, expiry, bind, audit_id = auth_info
+        # Validate that the auth info is valid and nothing is disabled
+        try:
+            self.identity_api.assert_user_enabled(
+                user_id=user_ref['id'], user=user_ref)
+            if tenant_ref:
+                self.resource_api.assert_project_enabled(
+                    project_id=tenant_ref['id'], project=tenant_ref)
+        except AssertionError as e:
+            six.reraise(exception.Unauthorized, exception.Unauthorized(e),
+                        sys.exc_info()[2])
+        # NOTE(morganfainberg): Make sure the data is in correct form since it
+        # might be consumed external to Keystone and this is a v2.0 controller.
+        # The user_ref is encoded into the auth_token_data which is returned as
+        # part of the token data. The token provider doesn't care about the
+        # format.
+        user_ref = self.v3_to_v2_user(user_ref)
+        if tenant_ref:
+            tenant_ref = self.filter_domain_id(tenant_ref)
+        auth_token_data = self._get_auth_token_data(user_ref,
+                                                    tenant_ref,
+                                                    metadata_ref,
+                                                    expiry,
+                                                    audit_id)
+
+        if tenant_ref:
+            catalog_ref = self.catalog_api.get_catalog(
+                user_ref['id'], tenant_ref['id'])
+        else:
+            catalog_ref = {}
+
+        auth_token_data['id'] = 'placeholder'
+        if bind:
+            auth_token_data['bind'] = bind
+
+        roles_ref = []
+        for role_id in metadata_ref.get('roles', []):
+            role_ref = self.role_api.get_role(role_id)
+            roles_ref.append(dict(name=role_ref['name']))
+
+        (token_id, token_data) = self.token_provider_api.issue_v2_token(
+            auth_token_data, roles_ref=roles_ref, catalog_ref=catalog_ref)
+
+        # NOTE(wanghong): We consume a trust use only when we are using trusts
+        # and have successfully issued a token.
+        if CONF.trust.enabled and 'trust_id' in auth:
+            self.trust_api.consume_use(auth['trust_id'])
+
+        return token_data
+
+    def _restrict_scope(self, token_model_ref):
+        # A trust token cannot be used to get another token
+        if token_model_ref.trust_scoped:
+            raise exception.Forbidden()
+        if not CONF.token.allow_rescope_scoped_token:
+            # Do not allow conversion from scoped tokens.
+            if token_model_ref.project_scoped or token_model_ref.domain_scoped:
+                raise exception.Forbidden(action=_("rescope a scoped token"))
+
+    def _authenticate_token(self, context, auth):
+        """Try to authenticate using an already existing token.
+
+        Returns auth_token_data, (user_ref, tenant_ref, metadata_ref)
+        """
+        if 'token' not in auth:
+            raise exception.ValidationError(
+                attribute='token', target='auth')
+
+        if "id" not in auth['token']:
+            raise exception.ValidationError(
+                attribute="id", target="token")
+
+        old_token = auth['token']['id']
+        if len(old_token) > CONF.max_token_size:
+            raise exception.ValidationSizeError(attribute='token',
+                                                size=CONF.max_token_size)
+
+        try:
+            token_model_ref = token_model.KeystoneToken(
+                token_id=old_token,
+                token_data=self.token_provider_api.validate_token(old_token))
+        except exception.NotFound as e:
+            raise exception.Unauthorized(e)
+
+        wsgi.validate_token_bind(context, token_model_ref)
+
+        self._restrict_scope(token_model_ref)
+        user_id = token_model_ref.user_id
+        tenant_id = self._get_project_id_from_auth(auth)
+
+        if not CONF.trust.enabled and 'trust_id' in auth:
+            raise exception.Forbidden('Trusts are disabled.')
+        elif CONF.trust.enabled and 'trust_id' in auth:
+            trust_ref = self.trust_api.get_trust(auth['trust_id'])
+            if trust_ref is None:
+                raise exception.Forbidden()
+            if user_id != trust_ref['trustee_user_id']:
+                raise exception.Forbidden()
+            if (trust_ref['project_id'] and
+                    tenant_id != trust_ref['project_id']):
+                raise exception.Forbidden()
+            if ('expires' in trust_ref) and (trust_ref['expires']):
+                expiry = trust_ref['expires']
+                if expiry < timeutils.parse_isotime(timeutils.isotime()):
+                    raise exception.Forbidden()
+            user_id = trust_ref['trustor_user_id']
+            trustor_user_ref = self.identity_api.get_user(
+                trust_ref['trustor_user_id'])
+            if not trustor_user_ref['enabled']:
+                raise exception.Forbidden()
+            trustee_user_ref = self.identity_api.get_user(
+                trust_ref['trustee_user_id'])
+            if not trustee_user_ref['enabled']:
+                raise exception.Forbidden()
+
+            if trust_ref['impersonation'] is True:
+                current_user_ref = trustor_user_ref
+            else:
+                current_user_ref = trustee_user_ref
+
+        else:
+            current_user_ref = self.identity_api.get_user(user_id)
+
+        metadata_ref = {}
+        tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref(
+            user_id, tenant_id)
+
+        expiry = token_model_ref.expires
+        if CONF.trust.enabled and 'trust_id' in auth:
+            trust_id = auth['trust_id']
+            trust_roles = []
+            for role in trust_ref['roles']:
+                if 'roles' not in metadata_ref:
+                    raise exception.Forbidden()
+                if role['id'] in metadata_ref['roles']:
+                    trust_roles.append(role['id'])
+                else:
+                    raise exception.Forbidden()
+            if 'expiry' in trust_ref and trust_ref['expiry']:
+                trust_expiry = timeutils.parse_isotime(trust_ref['expiry'])
+                if trust_expiry < expiry:
+                    expiry = trust_expiry
+            metadata_ref['roles'] = trust_roles
+            metadata_ref['trustee_user_id'] = trust_ref['trustee_user_id']
+            metadata_ref['trust_id'] = trust_id
+
+        bind = token_model_ref.bind
+        audit_id = token_model_ref.audit_chain_id
+
+        return (current_user_ref, tenant_ref, metadata_ref, expiry, bind,
+                audit_id)
+
+    def _authenticate_local(self, context, auth):
+        """Try to authenticate against the identity backend.
+
+        Returns auth_token_data, (user_ref, tenant_ref, metadata_ref)
+        """
+        if 'passwordCredentials' not in auth:
+            raise exception.ValidationError(
+                attribute='passwordCredentials', target='auth')
+
+        if "password" not in auth['passwordCredentials']:
+            raise exception.ValidationError(
+                attribute='password', target='passwordCredentials')
+
+        password = auth['passwordCredentials']['password']
+        if password and len(password) > CONF.identity.max_password_length:
+            raise exception.ValidationSizeError(
+                attribute='password', size=CONF.identity.max_password_length)
+
+        if (not auth['passwordCredentials'].get("userId") and
+                not auth['passwordCredentials'].get("username")):
+            raise exception.ValidationError(
+                attribute='username or userId',
+                target='passwordCredentials')
+
+        user_id = auth['passwordCredentials'].get('userId')
+        if user_id and len(user_id) > CONF.max_param_size:
+            raise exception.ValidationSizeError(attribute='userId',
+                                                size=CONF.max_param_size)
+
+        username = auth['passwordCredentials'].get('username', '')
+
+        if username:
+            if len(username) > CONF.max_param_size:
+                raise exception.ValidationSizeError(attribute='username',
+                                                    size=CONF.max_param_size)
+            try:
+                user_ref = self.identity_api.get_user_by_name(
+                    username, CONF.identity.default_domain_id)
+                user_id = user_ref['id']
+            except exception.UserNotFound as e:
+                raise exception.Unauthorized(e)
+
+        try:
+            user_ref = self.identity_api.authenticate(
+                context,
+                user_id=user_id,
+                password=password)
+        except AssertionError as e:
+            raise exception.Unauthorized(e.args[0])
+
+        metadata_ref = {}
+        tenant_id = self._get_project_id_from_auth(auth)
+        tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref(
+            user_id, tenant_id)
+
+        expiry = provider.default_expire_time()
+        bind = None
+        audit_id = None
+        return (user_ref, tenant_ref, metadata_ref, expiry, bind, audit_id)
+
+    def _authenticate_external(self, context, auth):
+        """Try to authenticate an external user via REMOTE_USER variable.
+
+        Returns auth_token_data, (user_ref, tenant_ref, metadata_ref)
+        """
+        environment = context.get('environment', {})
+        if not environment.get('REMOTE_USER'):
+            raise ExternalAuthNotApplicable()
+
+        username = environment['REMOTE_USER']
+        try:
+            user_ref = self.identity_api.get_user_by_name(
+                username, CONF.identity.default_domain_id)
+            user_id = user_ref['id']
+        except exception.UserNotFound as e:
+            raise exception.Unauthorized(e)
+
+        metadata_ref = {}
+        tenant_id = self._get_project_id_from_auth(auth)
+        tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref(
+            user_id, tenant_id)
+
+        expiry = provider.default_expire_time()
+        bind = None
+        if ('kerberos' in CONF.token.bind and
+                environment.get('AUTH_TYPE', '').lower() == 'negotiate'):
+            bind = {'kerberos': username}
+        audit_id = None
+
+        return (user_ref, tenant_ref, metadata_ref, expiry, bind, audit_id)
+
+    def _get_auth_token_data(self, user, tenant, metadata, expiry, audit_id):
+        return dict(user=user,
+                    tenant=tenant,
+                    metadata=metadata,
+                    expires=expiry,
+                    parent_audit_id=audit_id)
+
+    def _get_project_id_from_auth(self, auth):
+        """Extract tenant information from auth dict.
+
+        Returns a valid tenant_id if it exists, or None if not specified.
+        """
+        tenant_id = auth.get('tenantId')
+        if tenant_id and len(tenant_id) > CONF.max_param_size:
+            raise exception.ValidationSizeError(attribute='tenantId',
+                                                size=CONF.max_param_size)
+
+        tenant_name = auth.get('tenantName')
+        if tenant_name and len(tenant_name) > CONF.max_param_size:
+            raise exception.ValidationSizeError(attribute='tenantName',
+                                                size=CONF.max_param_size)
+
+        if tenant_name:
+            try:
+                tenant_ref = self.resource_api.get_project_by_name(
+                    tenant_name, CONF.identity.default_domain_id)
+                tenant_id = tenant_ref['id']
+            except exception.ProjectNotFound as e:
+                raise exception.Unauthorized(e)
+        return tenant_id
+
+    def _get_project_roles_and_ref(self, user_id, tenant_id):
+        """Returns the project roles for this user, and the project ref."""
+
+        tenant_ref = None
+        role_list = []
+        if tenant_id:
+            try:
+                tenant_ref = self.resource_api.get_project(tenant_id)
+                role_list = self.assignment_api.get_roles_for_user_and_project(
+                    user_id, tenant_id)
+            except exception.ProjectNotFound:
+                pass
+
+            if not role_list:
+                msg = _('User %(u_id)s is unauthorized for tenant %(t_id)s')
+                msg = msg % {'u_id': user_id, 't_id': tenant_id}
+                LOG.warning(msg)
+                raise exception.Unauthorized(msg)
+
+        return (tenant_ref, role_list)
+
+    def _get_token_ref(self, token_id, belongs_to=None):
+        """Returns a token if a valid one exists.
+
+        Optionally, limited to a token owned by a specific tenant.
+
+        """
+        token_ref = token_model.KeystoneToken(
+            token_id=token_id,
+            token_data=self.token_provider_api.validate_token(token_id))
+        if belongs_to:
+            if not token_ref.project_scoped:
+                raise exception.Unauthorized(
+                    _('Token does not belong to specified tenant.'))
+            if token_ref.project_id != belongs_to:
+                raise exception.Unauthorized(
+                    _('Token does not belong to specified tenant.'))
+        return token_ref
+
+    @controller.v2_deprecated
+    @controller.protected()
+    def validate_token_head(self, context, token_id):
+        """Check that a token is valid.
+
+        Optionally, also ensure that it is owned by a specific tenant.
+
+        Identical to ``validate_token``, except does not return a response.
+
+        The code in ``keystone.common.wsgi.render_response`` will remove
+        the content body.
+
+        """
+        belongs_to = context['query_string'].get('belongsTo')
+        return self.token_provider_api.validate_v2_token(token_id, belongs_to)
+
+    @controller.v2_deprecated
+    @controller.protected()
+    def validate_token(self, context, token_id):
+        """Check that a token is valid.
+
+        Optionally, also ensure that it is owned by a specific tenant.
+
+        Returns metadata about the token along any associated roles.
+
+        """
+        belongs_to = context['query_string'].get('belongsTo')
+        # TODO(ayoung) validate against revocation API
+        return self.token_provider_api.validate_v2_token(token_id, belongs_to)
+
+    @controller.v2_deprecated
+    def delete_token(self, context, token_id):
+        """Delete a token, effectively invalidating it for authz."""
+        # TODO(termie): this stuff should probably be moved to middleware
+        self.assert_admin(context)
+        self.token_provider_api.revoke_token(token_id)
+
+    @controller.v2_deprecated
+    @controller.protected()
+    def revocation_list(self, context, auth=None):
+        if not CONF.token.revoke_by_id:
+            raise exception.Gone()
+        tokens = self.token_provider_api.list_revoked_tokens()
+
+        for t in tokens:
+            expires = t['expires']
+            if expires and isinstance(expires, datetime.datetime):
+                t['expires'] = timeutils.isotime(expires)
+        data = {'revoked': tokens}
+        json_data = jsonutils.dumps(data)
+        signed_text = cms.cms_sign_text(json_data,
+                                        CONF.signing.certfile,
+                                        CONF.signing.keyfile)
+
+        return {'signed': signed_text}
+
+    @controller.v2_deprecated
+    def endpoints(self, context, token_id):
+        """Return a list of endpoints available to the token."""
+        self.assert_admin(context)
+
+        token_ref = self._get_token_ref(token_id)
+
+        catalog_ref = None
+        if token_ref.project_id:
+            catalog_ref = self.catalog_api.get_catalog(
+                token_ref.user_id,
+                token_ref.project_id)
+
+        return Auth.format_endpoint_list(catalog_ref)
+
+    @classmethod
+    def format_endpoint_list(cls, catalog_ref):
+        """Formats a list of endpoints according to Identity API v2.
+
+        The v2.0 API wants an endpoint list to look like::
+
+            {
+                'endpoints': [
+                    {
+                        'id': $endpoint_id,
+                        'name': $SERVICE[name],
+                        'type': $SERVICE,
+                        'tenantId': $tenant_id,
+                        'region': $REGION,
+                    }
+                ],
+                'endpoints_links': [],
+            }
+
+        """
+        if not catalog_ref:
+            return {}
+
+        endpoints = []
+        for region_name, region_ref in six.iteritems(catalog_ref):
+            for service_type, service_ref in six.iteritems(region_ref):
+                endpoints.append({
+                    'id': service_ref.get('id'),
+                    'name': service_ref.get('name'),
+                    'type': service_type,
+                    'region': region_name,
+                    'publicURL': service_ref.get('publicURL'),
+                    'internalURL': service_ref.get('internalURL'),
+                    'adminURL': service_ref.get('adminURL'),
+                })
+
+        return {'endpoints': endpoints, 'endpoints_links': []}
diff --git a/keystone-moon/keystone/token/persistence/__init__.py b/keystone-moon/keystone/token/persistence/__init__.py
new file mode 100644 (file)
index 0000000..29ad565
--- /dev/null
@@ -0,0 +1,16 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.token.persistence.core import *  # noqa
+
+
+__all__ = ['Manager', 'Driver', 'backends']
diff --git a/keystone-moon/keystone/token/persistence/backends/__init__.py b/keystone-moon/keystone/token/persistence/backends/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/token/persistence/backends/kvs.py b/keystone-moon/keystone/token/persistence/backends/kvs.py
new file mode 100644 (file)
index 0000000..b4807bf
--- /dev/null
@@ -0,0 +1,357 @@
+# Copyright 2013 Metacloud, Inc.
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from __future__ import absolute_import
+import copy
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import timeutils
+import six
+
+from keystone.common import kvs
+from keystone import exception
+from keystone.i18n import _, _LE, _LW
+from keystone import token
+from keystone.token import provider
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+class Token(token.persistence.Driver):
+    """KeyValueStore backend for tokens.
+
+    This is the base implementation for any/all key-value-stores (e.g.
+    memcached) for the Token backend.  It is recommended to only use the base
+    in-memory implementation for testing purposes.
+    """
+
+    revocation_key = 'revocation-list'
+    kvs_backend = 'openstack.kvs.Memory'
+
+    def __init__(self, backing_store=None, **kwargs):
+        super(Token, self).__init__()
+        self._store = kvs.get_key_value_store('token-driver')
+        if backing_store is not None:
+            self.kvs_backend = backing_store
+        if not self._store.is_configured:
+            # Do not re-configure the backend if the store has been initialized
+            self._store.configure(backing_store=self.kvs_backend, **kwargs)
+        if self.__class__ == Token:
+            # NOTE(morganfainberg): Only warn if the base KVS implementation
+            # is instantiated.
+            LOG.warn(_LW('It is recommended to only use the base '
+                         'key-value-store implementation for the token driver '
+                         'for testing purposes. Please use '
+                         'keystone.token.persistence.backends.memcache.Token '
+                         'or keystone.token.persistence.backends.sql.Token '
+                         'instead.'))
+
+    def _prefix_token_id(self, token_id):
+        return 'token-%s' % token_id.encode('utf-8')
+
+    def _prefix_user_id(self, user_id):
+        return 'usertokens-%s' % user_id.encode('utf-8')
+
+    def _get_key_or_default(self, key, default=None):
+        try:
+            return self._store.get(key)
+        except exception.NotFound:
+            return default
+
+    def _get_key(self, key):
+        return self._store.get(key)
+
+    def _set_key(self, key, value, lock=None):
+        self._store.set(key, value, lock)
+
+    def _delete_key(self, key):
+        return self._store.delete(key)
+
+    def get_token(self, token_id):
+        ptk = self._prefix_token_id(token_id)
+        try:
+            token_ref = self._get_key(ptk)
+        except exception.NotFound:
+            raise exception.TokenNotFound(token_id=token_id)
+
+        return token_ref
+
+    def create_token(self, token_id, data):
+        """Create a token by id and data.
+
+        It is assumed the caller has performed data validation on the "data"
+        parameter.
+        """
+        data_copy = copy.deepcopy(data)
+        ptk = self._prefix_token_id(token_id)
+        if not data_copy.get('expires'):
+            data_copy['expires'] = provider.default_expire_time()
+        if not data_copy.get('user_id'):
+            data_copy['user_id'] = data_copy['user']['id']
+
+        # NOTE(morganfainberg): for ease of manipulating the data without
+        # concern about the backend, always store the value(s) in the
+        # index as the isotime (string) version so this is where the string is
+        # built.
+        expires_str = timeutils.isotime(data_copy['expires'], subsecond=True)
+
+        self._set_key(ptk, data_copy)
+        user_id = data['user']['id']
+        user_key = self._prefix_user_id(user_id)
+        self._update_user_token_list(user_key, token_id, expires_str)
+        if CONF.trust.enabled and data.get('trust_id'):
+            # NOTE(morganfainberg): If trusts are enabled and this is a trust
+            # scoped token, we add the token to the trustee list as well.  This
+            # allows password changes of the trustee to also expire the token.
+            # There is no harm in placing the token in multiple lists, as
+            # _list_tokens is smart enough to handle almost any case of
+            # valid/invalid/expired for a given token.
+            token_data = data_copy['token_data']
+            if data_copy['token_version'] == token.provider.V2:
+                trustee_user_id = token_data['access']['trust'][
+                    'trustee_user_id']
+            elif data_copy['token_version'] == token.provider.V3:
+                trustee_user_id = token_data['OS-TRUST:trust'][
+                    'trustee_user_id']
+            else:
+                raise exception.UnsupportedTokenVersionException(
+                    _('Unknown token version %s') %
+                    data_copy.get('token_version'))
+
+            trustee_key = self._prefix_user_id(trustee_user_id)
+            self._update_user_token_list(trustee_key, token_id, expires_str)
+
+        return data_copy
+
+    def _get_user_token_list_with_expiry(self, user_key):
+        """Return a list of tuples in the format (token_id, token_expiry) for
+        the user_key.
+        """
+        return self._get_key_or_default(user_key, default=[])
+
+    def _get_user_token_list(self, user_key):
+        """Return a list of token_ids for the user_key."""
+        token_list = self._get_user_token_list_with_expiry(user_key)
+        # Each element is a tuple of (token_id, token_expiry). Most code does
+        # not care about the expiry, it is stripped out and only a
+        # list of token_ids are returned.
+        return [t[0] for t in token_list]
+
+    def _update_user_token_list(self, user_key, token_id, expires_isotime_str):
+        current_time = self._get_current_time()
+        revoked_token_list = set([t['id'] for t in
+                                  self.list_revoked_tokens()])
+
+        with self._store.get_lock(user_key) as lock:
+            filtered_list = []
+            token_list = self._get_user_token_list_with_expiry(user_key)
+            for item in token_list:
+                try:
+                    item_id, expires = self._format_token_index_item(item)
+                except (ValueError, TypeError):
+                    # NOTE(morganfainberg): Skip on expected errors
+                    # possibilities from the `_format_token_index_item` method.
+                    continue
+
+                if expires < current_time:
+                    LOG.debug(('Token `%(token_id)s` is expired, removing '
+                               'from `%(user_key)s`.'),
+                              {'token_id': item_id, 'user_key': user_key})
+                    continue
+
+                if item_id in revoked_token_list:
+                    # NOTE(morganfainberg): If the token has been revoked, it
+                    # can safely be removed from this list.  This helps to keep
+                    # the user_token_list as reasonably small as possible.
+                    LOG.debug(('Token `%(token_id)s` is revoked, removing '
+                               'from `%(user_key)s`.'),
+                              {'token_id': item_id, 'user_key': user_key})
+                    continue
+                filtered_list.append(item)
+            filtered_list.append((token_id, expires_isotime_str))
+            self._set_key(user_key, filtered_list, lock)
+            return filtered_list
+
+    def _get_current_time(self):
+        return timeutils.normalize_time(timeutils.utcnow())
+
+    def _add_to_revocation_list(self, data, lock):
+        filtered_list = []
+        revoked_token_data = {}
+
+        current_time = self._get_current_time()
+        expires = data['expires']
+
+        if isinstance(expires, six.string_types):
+            expires = timeutils.parse_isotime(expires)
+
+        expires = timeutils.normalize_time(expires)
+
+        if expires < current_time:
+            LOG.warning(_LW('Token `%s` is expired, not adding to the '
+                            'revocation list.'), data['id'])
+            return
+
+        revoked_token_data['expires'] = timeutils.isotime(expires,
+                                                          subsecond=True)
+        revoked_token_data['id'] = data['id']
+
+        token_list = self._get_key_or_default(self.revocation_key, default=[])
+        if not isinstance(token_list, list):
+            # NOTE(morganfainberg): In the case that the revocation list is not
+            # in a format we understand, reinitialize it. This is an attempt to
+            # not allow the revocation list to be completely broken if
+            # somehow the key is changed outside of keystone (e.g. memcache
+            # that is shared by multiple applications). Logging occurs at error
+            # level so that the cloud administrators have some awareness that
+            # the revocation_list needed to be cleared out. In all, this should
+            # be recoverable. Keystone cannot control external applications
+            # from changing a key in some backends, however, it is possible to
+            # gracefully handle and notify of this event.
+            LOG.error(_LE('Reinitializing revocation list due to error '
+                          'in loading revocation list from backend.  '
+                          'Expected `list` type got `%(type)s`. Old '
+                          'revocation list data: %(list)r'),
+                      {'type': type(token_list), 'list': token_list})
+            token_list = []
+
+        # NOTE(morganfainberg): on revocation, cleanup the expired entries, try
+        # to keep the list of tokens revoked at the minimum.
+        for token_data in token_list:
+            try:
+                expires_at = timeutils.normalize_time(
+                    timeutils.parse_isotime(token_data['expires']))
+            except ValueError:
+                LOG.warning(_LW('Removing `%s` from revocation list due to '
+                                'invalid expires data in revocation list.'),
+                            token_data.get('id', 'INVALID_TOKEN_DATA'))
+                continue
+            if expires_at > current_time:
+                filtered_list.append(token_data)
+        filtered_list.append(revoked_token_data)
+        self._set_key(self.revocation_key, filtered_list, lock)
+
+    def delete_token(self, token_id):
+        # Test for existence
+        with self._store.get_lock(self.revocation_key) as lock:
+            data = self.get_token(token_id)
+            ptk = self._prefix_token_id(token_id)
+            result = self._delete_key(ptk)
+            self._add_to_revocation_list(data, lock)
+        return result
+
+    def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
+                      consumer_id=None):
+        return super(Token, self).delete_tokens(
+            user_id=user_id,
+            tenant_id=tenant_id,
+            trust_id=trust_id,
+            consumer_id=consumer_id,
+        )
+
+    def _format_token_index_item(self, item):
+        try:
+            token_id, expires = item
+        except (TypeError, ValueError):
+            LOG.debug(('Invalid token entry expected tuple of '
+                       '`(<token_id>, <expires>)` got: `%(item)r`'),
+                      dict(item=item))
+            raise
+
+        try:
+            expires = timeutils.normalize_time(
+                timeutils.parse_isotime(expires))
+        except ValueError:
+            LOG.debug(('Invalid expires time on token `%(token_id)s`:'
+                       ' %(expires)r'),
+                      dict(token_id=token_id, expires=expires))
+            raise
+        return token_id, expires
+
+    def _token_match_tenant(self, token_ref, tenant_id):
+        if token_ref.get('tenant'):
+            return token_ref['tenant'].get('id') == tenant_id
+        return False
+
+    def _token_match_trust(self, token_ref, trust_id):
+        if not token_ref.get('trust_id'):
+            return False
+        return token_ref['trust_id'] == trust_id
+
+    def _token_match_consumer(self, token_ref, consumer_id):
+        try:
+            oauth = token_ref['token_data']['token']['OS-OAUTH1']
+            return oauth.get('consumer_id') == consumer_id
+        except KeyError:
+            return False
+
+    def _list_tokens(self, user_id, tenant_id=None, trust_id=None,
+                     consumer_id=None):
+        # This function is used to generate the list of tokens that should be
+        # revoked when revoking by token identifiers.  This approach will be
+        # deprecated soon, probably in the Juno release.  Setting revoke_by_id
+        # to False indicates that this kind of recording should not be
+        # performed.  In order to test the revocation events, tokens shouldn't
+        # be deleted from the backends.  This check ensures that tokens are
+        # still recorded.
+        if not CONF.token.revoke_by_id:
+            return []
+        tokens = []
+        user_key = self._prefix_user_id(user_id)
+        token_list = self._get_user_token_list_with_expiry(user_key)
+        current_time = self._get_current_time()
+        for item in token_list:
+            try:
+                token_id, expires = self._format_token_index_item(item)
+            except (TypeError, ValueError):
+                # NOTE(morganfainberg): Skip on expected error possibilities
+                # from the `_format_token_index_item` method.
+                continue
+
+            if expires < current_time:
+                continue
+
+            try:
+                token_ref = self.get_token(token_id)
+            except exception.TokenNotFound:
+                # NOTE(morganfainberg): Token doesn't exist, skip it.
+                continue
+            if token_ref:
+                if tenant_id is not None:
+                    if not self._token_match_tenant(token_ref, tenant_id):
+                        continue
+                if trust_id is not None:
+                    if not self._token_match_trust(token_ref, trust_id):
+                        continue
+                if consumer_id is not None:
+                    if not self._token_match_consumer(token_ref, consumer_id):
+                        continue
+
+                tokens.append(token_id)
+        return tokens
+
+    def list_revoked_tokens(self):
+        revoked_token_list = self._get_key_or_default(self.revocation_key,
+                                                      default=[])
+        if isinstance(revoked_token_list, list):
+            return revoked_token_list
+        return []
+
+    def flush_expired_tokens(self):
+        """Archive or delete tokens that have expired."""
+        raise exception.NotImplemented()
diff --git a/keystone-moon/keystone/token/persistence/backends/memcache.py b/keystone-moon/keystone/token/persistence/backends/memcache.py
new file mode 100644 (file)
index 0000000..03f27ea
--- /dev/null
@@ -0,0 +1,33 @@
+# Copyright 2013 Metacloud, Inc.
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+from keystone.token.persistence.backends import kvs
+
+
+CONF = cfg.CONF
+
+
+class Token(kvs.Token):
+    kvs_backend = 'openstack.kvs.Memcached'
+    memcached_backend = 'memcached'
+
+    def __init__(self, *args, **kwargs):
+        kwargs['memcached_backend'] = self.memcached_backend
+        kwargs['no_expiry_keys'] = [self.revocation_key]
+        kwargs['memcached_expire_time'] = CONF.token.expiration
+        kwargs['url'] = CONF.memcache.servers
+        super(Token, self).__init__(*args, **kwargs)
diff --git a/keystone-moon/keystone/token/persistence/backends/memcache_pool.py b/keystone-moon/keystone/token/persistence/backends/memcache_pool.py
new file mode 100644 (file)
index 0000000..55f9e8a
--- /dev/null
@@ -0,0 +1,28 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+from keystone.token.persistence.backends import memcache
+
+
+CONF = cfg.CONF
+
+
+class Token(memcache.Token):
+    memcached_backend = 'pooled_memcached'
+
+    def __init__(self, *args, **kwargs):
+        for arg in ('dead_retry', 'socket_timeout', 'pool_maxsize',
+                    'pool_unused_timeout', 'pool_connection_get_timeout'):
+            kwargs[arg] = getattr(CONF.memcache, arg)
+        super(Token, self).__init__(*args, **kwargs)
diff --git a/keystone-moon/keystone/token/persistence/backends/sql.py b/keystone-moon/keystone/token/persistence/backends/sql.py
new file mode 100644 (file)
index 0000000..fc70fb9
--- /dev/null
@@ -0,0 +1,279 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import functools
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import timeutils
+
+from keystone.common import sql
+from keystone import exception
+from keystone.i18n import _LI
+from keystone import token
+from keystone.token import provider
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+class TokenModel(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'token'
+    attributes = ['id', 'expires', 'user_id', 'trust_id']
+    id = sql.Column(sql.String(64), primary_key=True)
+    expires = sql.Column(sql.DateTime(), default=None)
+    extra = sql.Column(sql.JsonBlob())
+    valid = sql.Column(sql.Boolean(), default=True, nullable=False)
+    user_id = sql.Column(sql.String(64))
+    trust_id = sql.Column(sql.String(64))
+    __table_args__ = (
+        sql.Index('ix_token_expires', 'expires'),
+        sql.Index('ix_token_expires_valid', 'expires', 'valid'),
+        sql.Index('ix_token_user_id', 'user_id'),
+        sql.Index('ix_token_trust_id', 'trust_id')
+    )
+
+
+def _expiry_range_batched(session, upper_bound_func, batch_size):
+    """Returns the stop point of the next batch for expiration.
+
+    Return the timestamp of the next token that is `batch_size` rows from
+    being the oldest expired token.
+    """
+
+    # This expiry strategy splits the tokens into roughly equal sized batches
+    # to be deleted.  It does this by finding the timestamp of a token
+    # `batch_size` rows from the oldest token and yielding that to the caller.
+    # It's expected that the caller will then delete all rows with a timestamp
+    # equal to or older than the one yielded.  This may delete slightly more
+    # tokens than the batch_size, but that should be ok in almost all cases.
+    LOG.debug('Token expiration batch size: %d', batch_size)
+    query = session.query(TokenModel.expires)
+    query = query.filter(TokenModel.expires < upper_bound_func())
+    query = query.order_by(TokenModel.expires)
+    query = query.offset(batch_size - 1)
+    query = query.limit(1)
+    while True:
+        try:
+            next_expiration = query.one()[0]
+        except sql.NotFound:
+            # There are less than `batch_size` rows remaining, so fall
+            # through to the normal delete
+            break
+        yield next_expiration
+    yield upper_bound_func()
+
+
+def _expiry_range_all(session, upper_bound_func):
+    """Expires all tokens in one pass."""
+
+    yield upper_bound_func()
+
+
+class Token(token.persistence.Driver):
+    # Public interface
+    def get_token(self, token_id):
+        if token_id is None:
+            raise exception.TokenNotFound(token_id=token_id)
+        session = sql.get_session()
+        token_ref = session.query(TokenModel).get(token_id)
+        if not token_ref or not token_ref.valid:
+            raise exception.TokenNotFound(token_id=token_id)
+        return token_ref.to_dict()
+
+    def create_token(self, token_id, data):
+        data_copy = copy.deepcopy(data)
+        if not data_copy.get('expires'):
+            data_copy['expires'] = provider.default_expire_time()
+        if not data_copy.get('user_id'):
+            data_copy['user_id'] = data_copy['user']['id']
+
+        token_ref = TokenModel.from_dict(data_copy)
+        token_ref.valid = True
+        session = sql.get_session()
+        with session.begin():
+            session.add(token_ref)
+        return token_ref.to_dict()
+
+    def delete_token(self, token_id):
+        session = sql.get_session()
+        with session.begin():
+            token_ref = session.query(TokenModel).get(token_id)
+            if not token_ref or not token_ref.valid:
+                raise exception.TokenNotFound(token_id=token_id)
+            token_ref.valid = False
+
+    def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
+                      consumer_id=None):
+        """Deletes all tokens in one session
+
+        The user_id will be ignored if the trust_id is specified. user_id
+        will always be specified.
+        If using a trust, the token's user_id is set to the trustee's user ID
+        or the trustor's user ID, so will use trust_id to query the tokens.
+
+        """
+        session = sql.get_session()
+        with session.begin():
+            now = timeutils.utcnow()
+            query = session.query(TokenModel)
+            query = query.filter_by(valid=True)
+            query = query.filter(TokenModel.expires > now)
+            if trust_id:
+                query = query.filter(TokenModel.trust_id == trust_id)
+            else:
+                query = query.filter(TokenModel.user_id == user_id)
+
+            for token_ref in query.all():
+                if tenant_id:
+                    token_ref_dict = token_ref.to_dict()
+                    if not self._tenant_matches(tenant_id, token_ref_dict):
+                        continue
+                if consumer_id:
+                    token_ref_dict = token_ref.to_dict()
+                    if not self._consumer_matches(consumer_id, token_ref_dict):
+                        continue
+
+                token_ref.valid = False
+
+    def _tenant_matches(self, tenant_id, token_ref_dict):
+        return ((tenant_id is None) or
+                (token_ref_dict.get('tenant') and
+                 token_ref_dict['tenant'].get('id') == tenant_id))
+
+    def _consumer_matches(self, consumer_id, ref):
+        if consumer_id is None:
+            return True
+        else:
+            try:
+                oauth = ref['token_data']['token'].get('OS-OAUTH1', {})
+                return oauth and oauth['consumer_id'] == consumer_id
+            except KeyError:
+                return False
+
+    def _list_tokens_for_trust(self, trust_id):
+        session = sql.get_session()
+        tokens = []
+        now = timeutils.utcnow()
+        query = session.query(TokenModel)
+        query = query.filter(TokenModel.expires > now)
+        query = query.filter(TokenModel.trust_id == trust_id)
+
+        token_references = query.filter_by(valid=True)
+        for token_ref in token_references:
+            token_ref_dict = token_ref.to_dict()
+            tokens.append(token_ref_dict['id'])
+        return tokens
+
+    def _list_tokens_for_user(self, user_id, tenant_id=None):
+        session = sql.get_session()
+        tokens = []
+        now = timeutils.utcnow()
+        query = session.query(TokenModel)
+        query = query.filter(TokenModel.expires > now)
+        query = query.filter(TokenModel.user_id == user_id)
+
+        token_references = query.filter_by(valid=True)
+        for token_ref in token_references:
+            token_ref_dict = token_ref.to_dict()
+            if self._tenant_matches(tenant_id, token_ref_dict):
+                tokens.append(token_ref['id'])
+        return tokens
+
+    def _list_tokens_for_consumer(self, user_id, consumer_id):
+        tokens = []
+        session = sql.get_session()
+        with session.begin():
+            now = timeutils.utcnow()
+            query = session.query(TokenModel)
+            query = query.filter(TokenModel.expires > now)
+            query = query.filter(TokenModel.user_id == user_id)
+            token_references = query.filter_by(valid=True)
+
+            for token_ref in token_references:
+                token_ref_dict = token_ref.to_dict()
+                if self._consumer_matches(consumer_id, token_ref_dict):
+                    tokens.append(token_ref_dict['id'])
+        return tokens
+
+    def _list_tokens(self, user_id, tenant_id=None, trust_id=None,
+                     consumer_id=None):
+        if not CONF.token.revoke_by_id:
+            return []
+        if trust_id:
+            return self._list_tokens_for_trust(trust_id)
+        if consumer_id:
+            return self._list_tokens_for_consumer(user_id, consumer_id)
+        else:
+            return self._list_tokens_for_user(user_id, tenant_id)
+
+    def list_revoked_tokens(self):
+        session = sql.get_session()
+        tokens = []
+        now = timeutils.utcnow()
+        query = session.query(TokenModel.id, TokenModel.expires)
+        query = query.filter(TokenModel.expires > now)
+        token_references = query.filter_by(valid=False)
+        for token_ref in token_references:
+            record = {
+                'id': token_ref[0],
+                'expires': token_ref[1],
+            }
+            tokens.append(record)
+        return tokens
+
+    def _expiry_range_strategy(self, dialect):
+        """Choose a token range expiration strategy
+
+        Based on the DB dialect, select an expiry range callable that is
+        appropriate.
+        """
+
+        # DB2 and MySQL can both benefit from a batched strategy.  On DB2 the
+        # transaction log can fill up and on MySQL w/Galera, large
+        # transactions can exceed the maximum write set size.
+        if dialect == 'ibm_db_sa':
+            # Limit of 100 is known to not fill a transaction log
+            # of default maximum size while not significantly
+            # impacting the performance of large token purges on
+            # systems where the maximum transaction log size has
+            # been increased beyond the default.
+            return functools.partial(_expiry_range_batched,
+                                     batch_size=100)
+        elif dialect == 'mysql':
+            # We want somewhat more than 100, since Galera replication delay is
+            # at least RTT*2.  This can be a significant amount of time if
+            # doing replication across a WAN.
+            return functools.partial(_expiry_range_batched,
+                                     batch_size=1000)
+        return _expiry_range_all
+
+    def flush_expired_tokens(self):
+        session = sql.get_session()
+        dialect = session.bind.dialect.name
+        expiry_range_func = self._expiry_range_strategy(dialect)
+        query = session.query(TokenModel.expires)
+        total_removed = 0
+        upper_bound_func = timeutils.utcnow
+        for expiry_time in expiry_range_func(session, upper_bound_func):
+            delete_query = query.filter(TokenModel.expires <=
+                                        expiry_time)
+            row_count = delete_query.delete(synchronize_session=False)
+            total_removed += row_count
+            LOG.debug('Removed %d total expired tokens', total_removed)
+
+        session.flush()
+        LOG.info(_LI('Total expired tokens removed: %d'), total_removed)
diff --git a/keystone-moon/keystone/token/persistence/core.py b/keystone-moon/keystone/token/persistence/core.py
new file mode 100644 (file)
index 0000000..19f0df3
--- /dev/null
@@ -0,0 +1,361 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Main entry point into the Token persistence service."""
+
+import abc
+import copy
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import timeutils
+import six
+
+from keystone.common import cache
+from keystone.common import dependency
+from keystone.common import manager
+from keystone import exception
+from keystone.i18n import _LW
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+MEMOIZE = cache.get_memoization_decorator(section='token')
+REVOCATION_MEMOIZE = cache.get_memoization_decorator(
+    section='token', expiration_section='revoke')
+
+
+@dependency.requires('assignment_api', 'identity_api', 'resource_api',
+                     'token_provider_api', 'trust_api')
+class PersistenceManager(manager.Manager):
+    """Default pivot point for the Token backend.
+
+    See :mod:`keystone.common.manager.Manager` for more details on how this
+    dynamically calls the backend.
+
+    """
+
+    def __init__(self):
+        super(PersistenceManager, self).__init__(CONF.token.driver)
+
+    def _assert_valid(self, token_id, token_ref):
+        """Raise TokenNotFound if the token is expired."""
+        current_time = timeutils.normalize_time(timeutils.utcnow())
+        expires = token_ref.get('expires')
+        if not expires or current_time > timeutils.normalize_time(expires):
+            raise exception.TokenNotFound(token_id=token_id)
+
+    def get_token(self, token_id):
+        if not token_id:
+            # NOTE(morganfainberg): There are cases when the
+            # context['token_id'] will in-fact be None. This also saves
+            # a round-trip to the backend if we don't have a token_id.
+            raise exception.TokenNotFound(token_id='')
+        unique_id = self.token_provider_api.unique_id(token_id)
+        token_ref = self._get_token(unique_id)
+        # NOTE(morganfainberg): Lift expired checking to the manager, there is
+        # no reason to make the drivers implement this check. With caching,
+        # self._get_token could return an expired token. Make sure we behave
+        # as expected and raise TokenNotFound on those instances.
+        self._assert_valid(token_id, token_ref)
+        return token_ref
+
+    @MEMOIZE
+    def _get_token(self, token_id):
+        # Only ever use the "unique" id in the cache key.
+        return self.driver.get_token(token_id)
+
+    def create_token(self, token_id, data):
+        unique_id = self.token_provider_api.unique_id(token_id)
+        data_copy = copy.deepcopy(data)
+        data_copy['id'] = unique_id
+        ret = self.driver.create_token(unique_id, data_copy)
+        if MEMOIZE.should_cache(ret):
+            # NOTE(morganfainberg): when doing a cache set, you must pass the
+            # same arguments through, the same as invalidate (this includes
+            # "self"). First argument is always the value to be cached
+            self._get_token.set(ret, self, unique_id)
+        return ret
+
+    def delete_token(self, token_id):
+        if not CONF.token.revoke_by_id:
+            return
+        unique_id = self.token_provider_api.unique_id(token_id)
+        self.driver.delete_token(unique_id)
+        self._invalidate_individual_token_cache(unique_id)
+        self.invalidate_revocation_list()
+
+    def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
+                      consumer_id=None):
+        if not CONF.token.revoke_by_id:
+            return
+        token_list = self.driver._list_tokens(user_id, tenant_id, trust_id,
+                                              consumer_id)
+        self.driver.delete_tokens(user_id, tenant_id, trust_id, consumer_id)
+        for token_id in token_list:
+            unique_id = self.token_provider_api.unique_id(token_id)
+            self._invalidate_individual_token_cache(unique_id)
+        self.invalidate_revocation_list()
+
+    @REVOCATION_MEMOIZE
+    def list_revoked_tokens(self):
+        return self.driver.list_revoked_tokens()
+
+    def invalidate_revocation_list(self):
+        # NOTE(morganfainberg): Note that ``self`` needs to be passed to
+        # invalidate() because of the way the invalidation method works on
+        # determining cache-keys.
+        self.list_revoked_tokens.invalidate(self)
+
+    def delete_tokens_for_domain(self, domain_id):
+        """Delete all tokens for a given domain.
+
+        It will delete all the project-scoped tokens for the projects
+        that are owned by the given domain, as well as any tokens issued
+        to users that are owned by this domain.
+
+        However, deletion of domain_scoped tokens will still need to be
+        implemented as stated in TODO below.
+        """
+        if not CONF.token.revoke_by_id:
+            return
+        projects = self.resource_api.list_projects()
+        for project in projects:
+            if project['domain_id'] == domain_id:
+                for user_id in self.assignment_api.list_user_ids_for_project(
+                        project['id']):
+                    self.delete_tokens_for_user(user_id, project['id'])
+        # TODO(morganfainberg): implement deletion of domain_scoped tokens.
+
+        users = self.identity_api.list_users(domain_id)
+        user_ids = (user['id'] for user in users)
+        self.delete_tokens_for_users(user_ids)
+
+    def delete_tokens_for_user(self, user_id, project_id=None):
+        """Delete all tokens for a given user or user-project combination.
+
+        This method adds in the extra logic for handling trust-scoped token
+        revocations in a single call instead of needing to explicitly handle
+        trusts in the caller's logic.
+        """
+        if not CONF.token.revoke_by_id:
+            return
+        self.delete_tokens(user_id, tenant_id=project_id)
+        for trust in self.trust_api.list_trusts_for_trustee(user_id):
+            # Ensure we revoke tokens associated to the trust / project
+            # user_id combination.
+            self.delete_tokens(user_id, trust_id=trust['id'],
+                               tenant_id=project_id)
+        for trust in self.trust_api.list_trusts_for_trustor(user_id):
+            # Ensure we revoke tokens associated to the trust / project /
+            # user_id combination where the user_id is the trustor.
+
+            # NOTE(morganfainberg): This revocation is a bit coarse, but it
+            # covers a number of cases such as disabling of the trustor user,
+            # deletion of the trustor user (for any number of reasons). It
+            # might make sense to refine this and be more surgical on the
+            # deletions (e.g. don't revoke tokens for the trusts when the
+            # trustor changes password). For now, to maintain previous
+            # functionality, this will continue to be a bit overzealous on
+            # revocations.
+            self.delete_tokens(trust['trustee_user_id'], trust_id=trust['id'],
+                               tenant_id=project_id)
+
+    def delete_tokens_for_users(self, user_ids, project_id=None):
+        """Delete all tokens for a list of user_ids.
+
+        :param user_ids: list of user identifiers
+        :param project_id: optional project identifier
+        """
+        if not CONF.token.revoke_by_id:
+            return
+        for user_id in user_ids:
+            self.delete_tokens_for_user(user_id, project_id=project_id)
+
+    def _invalidate_individual_token_cache(self, token_id):
+        # NOTE(morganfainberg): invalidate takes the exact same arguments as
+        # the normal method, this means we need to pass "self" in (which gets
+        # stripped off).
+
+        # FIXME(morganfainberg): Does this cache actually need to be
+        # invalidated? We maintain a cached revocation list, which should be
+        # consulted before accepting a token as valid.  For now we will
+        # do the explicit individual token invalidation.
+        self._get_token.invalidate(self, token_id)
+        self.token_provider_api.invalidate_individual_token_cache(token_id)
+
+
+# NOTE(morganfainberg): @dependency.optional() is required here to ensure the
+# class-level optional dependency control attribute is populated as empty
+# this is because of the override of .__getattr__ and ensures that if the
+# optional dependency injector changes attributes, this class doesn't break.
+@dependency.optional()
+@dependency.requires('token_provider_api')
+@dependency.provider('token_api')
+class Manager(object):
+    """The token_api provider.
+
+    This class is a proxy class to the token_provider_api's persistence
+    manager.
+    """
+    def __init__(self):
+        # NOTE(morganfainberg): __init__ is required for dependency processing.
+        super(Manager, self).__init__()
+
+    def __getattr__(self, item):
+        """Forward calls to the `token_provider_api` persistence manager."""
+
+        # NOTE(morganfainberg): Prevent infinite recursion, raise an
+        # AttributeError for 'token_provider_api' ensuring that the dep
+        # injection doesn't infinitely try and lookup self.token_provider_api
+        # on _process_dependencies. This doesn't need an exception string as
+        # it should only ever be hit on instantiation.
+        if item == 'token_provider_api':
+            raise AttributeError()
+
+        f = getattr(self.token_provider_api._persistence, item)
+        LOG.warning(_LW('`token_api.%s` is deprecated as of Juno in favor of '
+                        'utilizing methods on `token_provider_api` and may be '
+                        'removed in Kilo.'), item)
+        setattr(self, item, f)
+        return f
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Driver(object):
+    """Interface description for a Token driver."""
+
+    @abc.abstractmethod
+    def get_token(self, token_id):
+        """Get a token by id.
+
+        :param token_id: identity of the token
+        :type token_id: string
+        :returns: token_ref
+        :raises: keystone.exception.TokenNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def create_token(self, token_id, data):
+        """Create a token by id and data.
+
+        :param token_id: identity of the token
+        :type token_id: string
+        :param data: dictionary with additional reference information
+
+        ::
+
+            {
+                expires=''
+                id=token_id,
+                user=user_ref,
+                tenant=tenant_ref,
+                metadata=metadata_ref
+            }
+
+        :type data: dict
+        :returns: token_ref or None.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_token(self, token_id):
+        """Deletes a token by id.
+
+        :param token_id: identity of the token
+        :type token_id: string
+        :returns: None.
+        :raises: keystone.exception.TokenNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
+                      consumer_id=None):
+        """Deletes tokens by user.
+
+        If the tenant_id is not None, only delete the tokens by user id under
+        the specified tenant.
+
+        If the trust_id is not None, it will be used to query tokens and the
+        user_id will be ignored.
+
+        If the consumer_id is not None, only delete the tokens by consumer id
+        that match the specified consumer id.
+
+        :param user_id: identity of user
+        :type user_id: string
+        :param tenant_id: identity of the tenant
+        :type tenant_id: string
+        :param trust_id: identity of the trust
+        :type trust_id: string
+        :param consumer_id: identity of the consumer
+        :type consumer_id: string
+        :returns: None.
+        :raises: keystone.exception.TokenNotFound
+
+        """
+        if not CONF.token.revoke_by_id:
+            return
+        token_list = self._list_tokens(user_id,
+                                       tenant_id=tenant_id,
+                                       trust_id=trust_id,
+                                       consumer_id=consumer_id)
+
+        for token in token_list:
+            try:
+                self.delete_token(token)
+            except exception.NotFound:
+                pass
+
+    @abc.abstractmethod
+    def _list_tokens(self, user_id, tenant_id=None, trust_id=None,
+                     consumer_id=None):
+        """Returns a list of current token_id's for a user
+
+        This is effectively a private method only used by the ``delete_tokens``
+        method and should not be called by anything outside of the
+        ``token_api`` manager or the token driver itself.
+
+        :param user_id: identity of the user
+        :type user_id: string
+        :param tenant_id: identity of the tenant
+        :type tenant_id: string
+        :param trust_id: identity of the trust
+        :type trust_id: string
+        :param consumer_id: identity of the consumer
+        :type consumer_id: string
+        :returns: list of token_id's
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_revoked_tokens(self):
+        """Returns a list of all revoked tokens
+
+        :returns: list of token_id's
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def flush_expired_tokens(self):
+        """Archive or delete tokens that have expired.
+        """
+        raise exception.NotImplemented()  # pragma: no cover
diff --git a/keystone-moon/keystone/token/provider.py b/keystone-moon/keystone/token/provider.py
new file mode 100644 (file)
index 0000000..fb41d4b
--- /dev/null
@@ -0,0 +1,584 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Token provider interface."""
+
+import abc
+import base64
+import datetime
+import sys
+import uuid
+
+from keystoneclient.common import cms
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import timeutils
+import six
+
+from keystone.common import cache
+from keystone.common import dependency
+from keystone.common import manager
+from keystone import exception
+from keystone.i18n import _, _LE
+from keystone.models import token_model
+from keystone import notifications
+from keystone.token import persistence
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+MEMOIZE = cache.get_memoization_decorator(section='token')
+
+# NOTE(morganfainberg): This is for compatibility in case someone was relying
+# on the old location of the UnsupportedTokenVersionException for their code.
+UnsupportedTokenVersionException = exception.UnsupportedTokenVersionException
+
+# supported token versions
+V2 = token_model.V2
+V3 = token_model.V3
+VERSIONS = token_model.VERSIONS
+
+
+def base64_encode(s):
+    """Encode a URL-safe string."""
+    return base64.urlsafe_b64encode(s).rstrip('=')
+
+
+def random_urlsafe_str():
+    """Generate a random URL-safe string."""
+    # chop the padding (==) off the end of the encoding to save space
+    return base64.urlsafe_b64encode(uuid.uuid4().bytes)[:-2]
+
+
+def random_urlsafe_str_to_bytes(s):
+    """Convert a string generated by ``random_urlsafe_str()`` to bytes."""
+    # restore the padding (==) at the end of the string
+    return base64.urlsafe_b64decode(s + '==')
+
+
+def default_expire_time():
+    """Determine when a fresh token should expire.
+
+    Expiration time varies based on configuration (see ``[token] expiration``).
+
+    :returns: a naive UTC datetime.datetime object
+
+    """
+    expire_delta = datetime.timedelta(seconds=CONF.token.expiration)
+    return timeutils.utcnow() + expire_delta
+
+
+def audit_info(parent_audit_id):
+    """Build the audit data for a token.
+
+    If ``parent_audit_id`` is None, the list will be one element in length
+    containing a newly generated audit_id.
+
+    If ``parent_audit_id`` is supplied, the list will be two elements in length
+    containing a newly generated audit_id and the ``parent_audit_id``. The
+    ``parent_audit_id`` will always be element index 1 in the resulting
+    list.
+
+    :param parent_audit_id: the audit of the original token in the chain
+    :type parent_audit_id: str
+    :returns: Keystone token audit data
+    """
+    audit_id = random_urlsafe_str()
+    if parent_audit_id is not None:
+        return [audit_id, parent_audit_id]
+    return [audit_id]
+
+
+@dependency.provider('token_provider_api')
+@dependency.requires('assignment_api', 'revoke_api')
+class Manager(manager.Manager):
+    """Default pivot point for the token provider backend.
+
+    See :mod:`keystone.common.manager.Manager` for more details on how this
+    dynamically calls the backend.
+
+    """
+
+    V2 = V2
+    V3 = V3
+    VERSIONS = VERSIONS
+    INVALIDATE_PROJECT_TOKEN_PERSISTENCE = 'invalidate_project_tokens'
+    INVALIDATE_USER_TOKEN_PERSISTENCE = 'invalidate_user_tokens'
+    _persistence_manager = None
+
+    def __init__(self):
+        super(Manager, self).__init__(CONF.token.provider)
+        self._register_callback_listeners()
+
+    def _register_callback_listeners(self):
+        # This is used by the @dependency.provider decorator to register the
+        # provider (token_provider_api) manager to listen for trust deletions.
+        callbacks = {
+            notifications.ACTIONS.deleted: [
+                ['OS-TRUST:trust', self._trust_deleted_event_callback],
+                ['user', self._delete_user_tokens_callback],
+                ['domain', self._delete_domain_tokens_callback],
+            ],
+            notifications.ACTIONS.disabled: [
+                ['user', self._delete_user_tokens_callback],
+                ['domain', self._delete_domain_tokens_callback],
+                ['project', self._delete_project_tokens_callback],
+            ],
+            notifications.ACTIONS.internal: [
+                [notifications.INVALIDATE_USER_TOKEN_PERSISTENCE,
+                    self._delete_user_tokens_callback],
+                [notifications.INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE,
+                    self._delete_user_project_tokens_callback],
+                [notifications.INVALIDATE_USER_OAUTH_CONSUMER_TOKENS,
+                    self._delete_user_oauth_consumer_tokens_callback],
+            ]
+        }
+
+        for event, cb_info in six.iteritems(callbacks):
+            for resource_type, callback_fns in cb_info:
+                notifications.register_event_callback(event, resource_type,
+                                                      callback_fns)
+
+    @property
+    def _needs_persistence(self):
+        return self.driver.needs_persistence()
+
+    @property
+    def _persistence(self):
+        # NOTE(morganfainberg): This should not be handled via __init__ to
+        # avoid dependency injection oddities circular dependencies (where
+        # the provider manager requires the token persistence manager, which
+        # requires the token provider manager).
+        if self._persistence_manager is None:
+            self._persistence_manager = persistence.PersistenceManager()
+        return self._persistence_manager
+
+    def unique_id(self, token_id):
+        """Return a unique ID for a token.
+
+        The returned value is useful as the primary key of a database table,
+        memcache store, or other lookup table.
+
+        :returns: Given a PKI token, returns it's hashed value. Otherwise,
+                  returns the passed-in value (such as a UUID token ID or an
+                  existing hash).
+        """
+        return cms.cms_hash_token(token_id, mode=CONF.token.hash_algorithm)
+
+    def _create_token(self, token_id, token_data):
+        try:
+            if isinstance(token_data['expires'], six.string_types):
+                token_data['expires'] = timeutils.normalize_time(
+                    timeutils.parse_isotime(token_data['expires']))
+            self._persistence.create_token(token_id, token_data)
+        except Exception:
+            exc_info = sys.exc_info()
+            # an identical token may have been created already.
+            # if so, return the token_data as it is also identical
+            try:
+                self._persistence.get_token(token_id)
+            except exception.TokenNotFound:
+                six.reraise(*exc_info)
+
+    def validate_token(self, token_id, belongs_to=None):
+        unique_id = self.unique_id(token_id)
+        # NOTE(morganfainberg): Ensure we never use the long-form token_id
+        # (PKI) as part of the cache_key.
+        token = self._validate_token(unique_id)
+        self._token_belongs_to(token, belongs_to)
+        self._is_valid_token(token)
+        return token
+
+    def check_revocation_v2(self, token):
+        try:
+            token_data = token['access']
+        except KeyError:
+            raise exception.TokenNotFound(_('Failed to validate token'))
+
+        token_values = self.revoke_api.model.build_token_values_v2(
+            token_data, CONF.identity.default_domain_id)
+        self.revoke_api.check_token(token_values)
+
+    def validate_v2_token(self, token_id, belongs_to=None):
+        unique_id = self.unique_id(token_id)
+        if self._needs_persistence:
+            # NOTE(morganfainberg): Ensure we never use the long-form token_id
+            # (PKI) as part of the cache_key.
+            token_ref = self._persistence.get_token(unique_id)
+        else:
+            token_ref = token_id
+        token = self._validate_v2_token(token_ref)
+        self._token_belongs_to(token, belongs_to)
+        self._is_valid_token(token)
+        return token
+
+    def check_revocation_v3(self, token):
+        try:
+            token_data = token['token']
+        except KeyError:
+            raise exception.TokenNotFound(_('Failed to validate token'))
+        token_values = self.revoke_api.model.build_token_values(token_data)
+        self.revoke_api.check_token(token_values)
+
+    def check_revocation(self, token):
+        version = self.driver.get_token_version(token)
+        if version == V2:
+            return self.check_revocation_v2(token)
+        else:
+            return self.check_revocation_v3(token)
+
+    def validate_v3_token(self, token_id):
+        unique_id = self.unique_id(token_id)
+        # NOTE(lbragstad): Only go to persistent storage if we have a token to
+        # fetch from the backend. If the Fernet token provider is being used
+        # this step isn't necessary. The Fernet token reference is persisted in
+        # the token_id, so in this case set the token_ref as the identifier of
+        # the token.
+        if not self._needs_persistence:
+            token_ref = token_id
+        else:
+            # NOTE(morganfainberg): Ensure we never use the long-form token_id
+            # (PKI) as part of the cache_key.
+            token_ref = self._persistence.get_token(unique_id)
+        token = self._validate_v3_token(token_ref)
+        self._is_valid_token(token)
+        return token
+
+    @MEMOIZE
+    def _validate_token(self, token_id):
+        if not self._needs_persistence:
+            return self.driver.validate_v3_token(token_id)
+        token_ref = self._persistence.get_token(token_id)
+        version = self.driver.get_token_version(token_ref)
+        if version == self.V3:
+            return self.driver.validate_v3_token(token_ref)
+        elif version == self.V2:
+            return self.driver.validate_v2_token(token_ref)
+        raise exception.UnsupportedTokenVersionException()
+
+    @MEMOIZE
+    def _validate_v2_token(self, token_id):
+        return self.driver.validate_v2_token(token_id)
+
+    @MEMOIZE
+    def _validate_v3_token(self, token_id):
+        return self.driver.validate_v3_token(token_id)
+
+    def _is_valid_token(self, token):
+        """Verify the token is valid format and has not expired."""
+
+        current_time = timeutils.normalize_time(timeutils.utcnow())
+
+        try:
+            # Get the data we need from the correct location (V2 and V3 tokens
+            # differ in structure, Try V3 first, fall back to V2 second)
+            token_data = token.get('token', token.get('access'))
+            expires_at = token_data.get('expires_at',
+                                        token_data.get('expires'))
+            if not expires_at:
+                expires_at = token_data['token']['expires']
+            expiry = timeutils.normalize_time(
+                timeutils.parse_isotime(expires_at))
+        except Exception:
+            LOG.exception(_LE('Unexpected error or malformed token '
+                              'determining token expiry: %s'), token)
+            raise exception.TokenNotFound(_('Failed to validate token'))
+
+        if current_time < expiry:
+            self.check_revocation(token)
+            # Token has not expired and has not been revoked.
+            return None
+        else:
+            raise exception.TokenNotFound(_('Failed to validate token'))
+
+    def _token_belongs_to(self, token, belongs_to):
+        """Check if the token belongs to the right tenant.
+
+        This is only used on v2 tokens.  The structural validity of the token
+        will have already been checked before this method is called.
+
+        """
+        if belongs_to:
+            token_data = token['access']['token']
+            if ('tenant' not in token_data or
+                    token_data['tenant']['id'] != belongs_to):
+                raise exception.Unauthorized()
+
+    def issue_v2_token(self, token_ref, roles_ref=None, catalog_ref=None):
+        token_id, token_data = self.driver.issue_v2_token(
+            token_ref, roles_ref, catalog_ref)
+
+        if self._needs_persistence:
+            data = dict(key=token_id,
+                        id=token_id,
+                        expires=token_data['access']['token']['expires'],
+                        user=token_ref['user'],
+                        tenant=token_ref['tenant'],
+                        metadata=token_ref['metadata'],
+                        token_data=token_data,
+                        bind=token_ref.get('bind'),
+                        trust_id=token_ref['metadata'].get('trust_id'),
+                        token_version=self.V2)
+            self._create_token(token_id, data)
+
+        return token_id, token_data
+
+    def issue_v3_token(self, user_id, method_names, expires_at=None,
+                       project_id=None, domain_id=None, auth_context=None,
+                       trust=None, metadata_ref=None, include_catalog=True,
+                       parent_audit_id=None):
+        token_id, token_data = self.driver.issue_v3_token(
+            user_id, method_names, expires_at, project_id, domain_id,
+            auth_context, trust, metadata_ref, include_catalog,
+            parent_audit_id)
+
+        if metadata_ref is None:
+            metadata_ref = {}
+
+        if 'project' in token_data['token']:
+            # project-scoped token, fill in the v2 token data
+            # all we care are the role IDs
+
+            # FIXME(gyee): is there really a need to store roles in metadata?
+            role_ids = [r['id'] for r in token_data['token']['roles']]
+            metadata_ref = {'roles': role_ids}
+
+        if trust:
+            metadata_ref.setdefault('trust_id', trust['id'])
+            metadata_ref.setdefault('trustee_user_id',
+                                    trust['trustee_user_id'])
+
+        data = dict(key=token_id,
+                    id=token_id,
+                    expires=token_data['token']['expires_at'],
+                    user=token_data['token']['user'],
+                    tenant=token_data['token'].get('project'),
+                    metadata=metadata_ref,
+                    token_data=token_data,
+                    trust_id=trust['id'] if trust else None,
+                    token_version=self.V3)
+        if self._needs_persistence:
+            self._create_token(token_id, data)
+        return token_id, token_data
+
+    def invalidate_individual_token_cache(self, token_id):
+        # NOTE(morganfainberg): invalidate takes the exact same arguments as
+        # the normal method, this means we need to pass "self" in (which gets
+        # stripped off).
+
+        # FIXME(morganfainberg): Does this cache actually need to be
+        # invalidated? We maintain a cached revocation list, which should be
+        # consulted before accepting a token as valid.  For now we will
+        # do the explicit individual token invalidation.
+
+        self._validate_token.invalidate(self, token_id)
+        self._validate_v2_token.invalidate(self, token_id)
+        self._validate_v3_token.invalidate(self, token_id)
+
+    def revoke_token(self, token_id, revoke_chain=False):
+        revoke_by_expires = False
+        project_id = None
+        domain_id = None
+
+        token_ref = token_model.KeystoneToken(
+            token_id=token_id,
+            token_data=self.validate_token(token_id))
+
+        user_id = token_ref.user_id
+        expires_at = token_ref.expires
+        audit_id = token_ref.audit_id
+        audit_chain_id = token_ref.audit_chain_id
+        if token_ref.project_scoped:
+            project_id = token_ref.project_id
+        if token_ref.domain_scoped:
+            domain_id = token_ref.domain_id
+
+        if audit_id is None and not revoke_chain:
+            LOG.debug('Received token with no audit_id.')
+            revoke_by_expires = True
+
+        if audit_chain_id is None and revoke_chain:
+            LOG.debug('Received token with no audit_chain_id.')
+            revoke_by_expires = True
+
+        if revoke_by_expires:
+            self.revoke_api.revoke_by_expiration(user_id, expires_at,
+                                                 project_id=project_id,
+                                                 domain_id=domain_id)
+        elif revoke_chain:
+            self.revoke_api.revoke_by_audit_chain_id(audit_chain_id,
+                                                     project_id=project_id,
+                                                     domain_id=domain_id)
+        else:
+            self.revoke_api.revoke_by_audit_id(audit_id)
+
+        if CONF.token.revoke_by_id and self._needs_persistence:
+            self._persistence.delete_token(token_id=token_id)
+
+    def list_revoked_tokens(self):
+        return self._persistence.list_revoked_tokens()
+
+    def _trust_deleted_event_callback(self, service, resource_type, operation,
+                                      payload):
+        if CONF.token.revoke_by_id:
+            trust_id = payload['resource_info']
+            trust = self.trust_api.get_trust(trust_id, deleted=True)
+            self._persistence.delete_tokens(user_id=trust['trustor_user_id'],
+                                            trust_id=trust_id)
+
+    def _delete_user_tokens_callback(self, service, resource_type, operation,
+                                     payload):
+        if CONF.token.revoke_by_id:
+            user_id = payload['resource_info']
+            self._persistence.delete_tokens_for_user(user_id)
+
+    def _delete_domain_tokens_callback(self, service, resource_type,
+                                       operation, payload):
+        if CONF.token.revoke_by_id:
+            domain_id = payload['resource_info']
+            self._persistence.delete_tokens_for_domain(domain_id=domain_id)
+
+    def _delete_user_project_tokens_callback(self, service, resource_type,
+                                             operation, payload):
+        if CONF.token.revoke_by_id:
+            user_id = payload['resource_info']['user_id']
+            project_id = payload['resource_info']['project_id']
+            self._persistence.delete_tokens_for_user(user_id=user_id,
+                                                     project_id=project_id)
+
+    def _delete_project_tokens_callback(self, service, resource_type,
+                                        operation, payload):
+        if CONF.token.revoke_by_id:
+            project_id = payload['resource_info']
+            self._persistence.delete_tokens_for_users(
+                self.assignment_api.list_user_ids_for_project(project_id),
+                project_id=project_id)
+
+    def _delete_user_oauth_consumer_tokens_callback(self, service,
+                                                    resource_type, operation,
+                                                    payload):
+        if CONF.token.revoke_by_id:
+            user_id = payload['resource_info']['user_id']
+            consumer_id = payload['resource_info']['consumer_id']
+            self._persistence.delete_tokens(user_id=user_id,
+                                            consumer_id=consumer_id)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Provider(object):
+    """Interface description for a Token provider."""
+
+    @abc.abstractmethod
+    def needs_persistence(self):
+        """Determine if the token should be persisted.
+
+        If the token provider requires that the token be persisted to a
+        backend this should return True, otherwise return False.
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_token_version(self, token_data):
+        """Return the version of the given token data.
+
+        If the given token data is unrecognizable,
+        UnsupportedTokenVersionException is raised.
+
+        :param token_data: token_data
+        :type token_data: dict
+        :returns: token version string
+        :raises: keystone.token.provider.UnsupportedTokenVersionException
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def issue_v2_token(self, token_ref, roles_ref=None, catalog_ref=None):
+        """Issue a V2 token.
+
+        :param token_ref: token data to generate token from
+        :type token_ref: dict
+        :param roles_ref: optional roles list
+        :type roles_ref: dict
+        :param catalog_ref: optional catalog information
+        :type catalog_ref: dict
+        :returns: (token_id, token_data)
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def issue_v3_token(self, user_id, method_names, expires_at=None,
+                       project_id=None, domain_id=None, auth_context=None,
+                       trust=None, metadata_ref=None, include_catalog=True,
+                       parent_audit_id=None):
+        """Issue a V3 Token.
+
+        :param user_id: identity of the user
+        :type user_id: string
+        :param method_names: names of authentication methods
+        :type method_names: list
+        :param expires_at: optional time the token will expire
+        :type expires_at: string
+        :param project_id: optional project identity
+        :type project_id: string
+        :param domain_id: optional domain identity
+        :type domain_id: string
+        :param auth_context: optional context from the authorization plugins
+        :type auth_context: dict
+        :param trust: optional trust reference
+        :type trust: dict
+        :param metadata_ref: optional metadata reference
+        :type metadata_ref: dict
+        :param include_catalog: optional, include the catalog in token data
+        :type include_catalog: boolean
+        :param parent_audit_id: optional, the audit id of the parent token
+        :type parent_audit_id: string
+        :returns: (token_id, token_data)
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def validate_v2_token(self, token_ref):
+        """Validate the given V2 token and return the token data.
+
+        Must raise Unauthorized exception if unable to validate token.
+
+        :param token_ref: the token reference
+        :type token_ref: dict
+        :returns: token data
+        :raises: keystone.exception.TokenNotFound
+
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def validate_v3_token(self, token_ref):
+        """Validate the given V3 token and return the token_data.
+
+        :param token_ref: the token reference
+        :type token_ref: dict
+        :returns: token data
+        :raises: keystone.exception.TokenNotFound
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def _get_token_id(self, token_data):
+        """Generate the token_id based upon the data in token_data.
+
+        :param token_data: token information
+        :type token_data: dict
+        returns: token identifier
+        """
+        raise exception.NotImplemented()  # pragma: no cover
diff --git a/keystone-moon/keystone/token/providers/__init__.py b/keystone-moon/keystone/token/providers/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/token/providers/common.py b/keystone-moon/keystone/token/providers/common.py
new file mode 100644 (file)
index 0000000..717e149
--- /dev/null
@@ -0,0 +1,709 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_serialization import jsonutils
+from oslo_utils import timeutils
+import six
+from six.moves.urllib import parse
+
+from keystone.common import controller as common_controller
+from keystone.common import dependency
+from keystone.contrib import federation
+from keystone import exception
+from keystone.i18n import _, _LE
+from keystone.openstack.common import versionutils
+from keystone import token
+from keystone.token import provider
+
+
+LOG = log.getLogger(__name__)
+CONF = cfg.CONF
+
+
+@dependency.requires('catalog_api', 'resource_api')
+class V2TokenDataHelper(object):
+    """Creates V2 token data."""
+
+    def v3_to_v2_token(self, token_id, v3_token_data):
+        token_data = {}
+        # Build v2 token
+        v3_token = v3_token_data['token']
+
+        token = {}
+        token['id'] = token_id
+        token['expires'] = v3_token.get('expires_at')
+        token['issued_at'] = v3_token.get('issued_at')
+        token['audit_ids'] = v3_token.get('audit_ids')
+
+        if 'project' in v3_token:
+            # v3 token_data does not contain all tenant attributes
+            tenant = self.resource_api.get_project(
+                v3_token['project']['id'])
+            token['tenant'] = common_controller.V2Controller.filter_domain_id(
+                tenant)
+        token_data['token'] = token
+
+        # Build v2 user
+        v3_user = v3_token['user']
+        user = common_controller.V2Controller.v3_to_v2_user(v3_user)
+
+        # Set user roles
+        user['roles'] = []
+        role_ids = []
+        for role in v3_token.get('roles', []):
+            # Filter role id since it's not included in v2 token response
+            role_ids.append(role.pop('id'))
+            user['roles'].append(role)
+        user['roles_links'] = []
+
+        token_data['user'] = user
+
+        # Get and build v2 service catalog
+        token_data['serviceCatalog'] = []
+        if 'tenant' in token:
+            catalog_ref = self.catalog_api.get_catalog(
+                user['id'], token['tenant']['id'])
+            if catalog_ref:
+                token_data['serviceCatalog'] = self.format_catalog(catalog_ref)
+
+        # Build v2 metadata
+        metadata = {}
+        metadata['roles'] = role_ids
+        # Setting is_admin to keep consistency in v2 response
+        metadata['is_admin'] = 0
+        token_data['metadata'] = metadata
+
+        return {'access': token_data}
+
+    @classmethod
+    def format_token(cls, token_ref, roles_ref=None, catalog_ref=None,
+                     trust_ref=None):
+        audit_info = None
+        user_ref = token_ref['user']
+        metadata_ref = token_ref['metadata']
+        if roles_ref is None:
+            roles_ref = []
+        expires = token_ref.get('expires', provider.default_expire_time())
+        if expires is not None:
+            if not isinstance(expires, six.text_type):
+                expires = timeutils.isotime(expires)
+
+        token_data = token_ref.get('token_data')
+        if token_data:
+            token_audit = token_data.get(
+                'access', token_data).get('token', {}).get('audit_ids')
+            audit_info = token_audit
+
+        if audit_info is None:
+            audit_info = provider.audit_info(token_ref.get('parent_audit_id'))
+
+        o = {'access': {'token': {'id': token_ref['id'],
+                                  'expires': expires,
+                                  'issued_at': timeutils.strtime(),
+                                  'audit_ids': audit_info
+                                  },
+                        'user': {'id': user_ref['id'],
+                                 'name': user_ref['name'],
+                                 'username': user_ref['name'],
+                                 'roles': roles_ref,
+                                 'roles_links': metadata_ref.get('roles_links',
+                                                                 [])
+                                 }
+                        }
+             }
+        if 'bind' in token_ref:
+            o['access']['token']['bind'] = token_ref['bind']
+        if 'tenant' in token_ref and token_ref['tenant']:
+            token_ref['tenant']['enabled'] = True
+            o['access']['token']['tenant'] = token_ref['tenant']
+        if catalog_ref is not None:
+            o['access']['serviceCatalog'] = V2TokenDataHelper.format_catalog(
+                catalog_ref)
+        if metadata_ref:
+            if 'is_admin' in metadata_ref:
+                o['access']['metadata'] = {'is_admin':
+                                           metadata_ref['is_admin']}
+            else:
+                o['access']['metadata'] = {'is_admin': 0}
+        if 'roles' in metadata_ref:
+            o['access']['metadata']['roles'] = metadata_ref['roles']
+        if CONF.trust.enabled and trust_ref:
+            o['access']['trust'] = {'trustee_user_id':
+                                    trust_ref['trustee_user_id'],
+                                    'id': trust_ref['id'],
+                                    'trustor_user_id':
+                                    trust_ref['trustor_user_id'],
+                                    'impersonation':
+                                    trust_ref['impersonation']
+                                    }
+        return o
+
+    @classmethod
+    def format_catalog(cls, catalog_ref):
+        """Munge catalogs from internal to output format
+        Internal catalogs look like::
+
+          {$REGION: {
+              {$SERVICE: {
+                  $key1: $value1,
+                  ...
+                  }
+              }
+          }
+
+        The legacy api wants them to look like::
+
+          [{'name': $SERVICE[name],
+            'type': $SERVICE,
+            'endpoints': [{
+                'tenantId': $tenant_id,
+                ...
+                'region': $REGION,
+                }],
+            'endpoints_links': [],
+           }]
+
+        """
+        if not catalog_ref:
+            return []
+
+        services = {}
+        for region, region_ref in six.iteritems(catalog_ref):
+            for service, service_ref in six.iteritems(region_ref):
+                new_service_ref = services.get(service, {})
+                new_service_ref['name'] = service_ref.pop('name')
+                new_service_ref['type'] = service
+                new_service_ref['endpoints_links'] = []
+                service_ref['region'] = region
+
+                endpoints_ref = new_service_ref.get('endpoints', [])
+                endpoints_ref.append(service_ref)
+
+                new_service_ref['endpoints'] = endpoints_ref
+                services[service] = new_service_ref
+
+        return services.values()
+
+
+@dependency.requires('assignment_api', 'catalog_api', 'federation_api',
+                     'identity_api', 'resource_api', 'role_api', 'trust_api')
+class V3TokenDataHelper(object):
+    """Token data helper."""
+    def __init__(self):
+        # Keep __init__ around to ensure dependency injection works.
+        super(V3TokenDataHelper, self).__init__()
+
+    def _get_filtered_domain(self, domain_id):
+        domain_ref = self.resource_api.get_domain(domain_id)
+        return {'id': domain_ref['id'], 'name': domain_ref['name']}
+
+    def _get_filtered_project(self, project_id):
+        project_ref = self.resource_api.get_project(project_id)
+        filtered_project = {
+            'id': project_ref['id'],
+            'name': project_ref['name']}
+        filtered_project['domain'] = self._get_filtered_domain(
+            project_ref['domain_id'])
+        return filtered_project
+
+    def _populate_scope(self, token_data, domain_id, project_id):
+        if 'domain' in token_data or 'project' in token_data:
+            # scope already exist, no need to populate it again
+            return
+
+        if domain_id:
+            token_data['domain'] = self._get_filtered_domain(domain_id)
+        if project_id:
+            token_data['project'] = self._get_filtered_project(project_id)
+
+    def _get_roles_for_user(self, user_id, domain_id, project_id):
+        roles = []
+        if domain_id:
+            roles = self.assignment_api.get_roles_for_user_and_domain(
+                user_id, domain_id)
+        if project_id:
+            roles = self.assignment_api.get_roles_for_user_and_project(
+                user_id, project_id)
+        return [self.role_api.get_role(role_id) for role_id in roles]
+
+    def _populate_roles_for_groups(self, group_ids,
+                                   project_id=None, domain_id=None,
+                                   user_id=None):
+        def _check_roles(roles, user_id, project_id, domain_id):
+            # User was granted roles so simply exit this function.
+            if roles:
+                return
+            if project_id:
+                msg = _('User %(user_id)s has no access '
+                        'to project %(project_id)s') % {
+                            'user_id': user_id,
+                            'project_id': project_id}
+            elif domain_id:
+                msg = _('User %(user_id)s has no access '
+                        'to domain %(domain_id)s') % {
+                            'user_id': user_id,
+                            'domain_id': domain_id}
+            # Since no roles were found a user is not authorized to
+            # perform any operations. Raise an exception with
+            # appropriate error message.
+            raise exception.Unauthorized(msg)
+
+        roles = self.assignment_api.get_roles_for_groups(group_ids,
+                                                         project_id,
+                                                         domain_id)
+        _check_roles(roles, user_id, project_id, domain_id)
+        return roles
+
+    def _populate_user(self, token_data, user_id, trust):
+        if 'user' in token_data:
+            # no need to repopulate user if it already exists
+            return
+
+        user_ref = self.identity_api.get_user(user_id)
+        if CONF.trust.enabled and trust and 'OS-TRUST:trust' not in token_data:
+            trustor_user_ref = (self.identity_api.get_user(
+                                trust['trustor_user_id']))
+            try:
+                self.identity_api.assert_user_enabled(trust['trustor_user_id'])
+            except AssertionError:
+                raise exception.Forbidden(_('Trustor is disabled.'))
+            if trust['impersonation']:
+                user_ref = trustor_user_ref
+            token_data['OS-TRUST:trust'] = (
+                {
+                    'id': trust['id'],
+                    'trustor_user': {'id': trust['trustor_user_id']},
+                    'trustee_user': {'id': trust['trustee_user_id']},
+                    'impersonation': trust['impersonation']
+                })
+        filtered_user = {
+            'id': user_ref['id'],
+            'name': user_ref['name'],
+            'domain': self._get_filtered_domain(user_ref['domain_id'])}
+        token_data['user'] = filtered_user
+
+    def _populate_oauth_section(self, token_data, access_token):
+        if access_token:
+            access_token_id = access_token['id']
+            consumer_id = access_token['consumer_id']
+            token_data['OS-OAUTH1'] = ({'access_token_id': access_token_id,
+                                        'consumer_id': consumer_id})
+
+    def _populate_roles(self, token_data, user_id, domain_id, project_id,
+                        trust, access_token):
+        if 'roles' in token_data:
+            # no need to repopulate roles
+            return
+
+        if access_token:
+            filtered_roles = []
+            authed_role_ids = jsonutils.loads(access_token['role_ids'])
+            all_roles = self.role_api.list_roles()
+            for role in all_roles:
+                for authed_role in authed_role_ids:
+                    if authed_role == role['id']:
+                        filtered_roles.append({'id': role['id'],
+                                               'name': role['name']})
+            token_data['roles'] = filtered_roles
+            return
+
+        if CONF.trust.enabled and trust:
+            token_user_id = trust['trustor_user_id']
+            token_project_id = trust['project_id']
+            # trusts do not support domains yet
+            token_domain_id = None
+        else:
+            token_user_id = user_id
+            token_project_id = project_id
+            token_domain_id = domain_id
+
+        if token_domain_id or token_project_id:
+            roles = self._get_roles_for_user(token_user_id,
+                                             token_domain_id,
+                                             token_project_id)
+            filtered_roles = []
+            if CONF.trust.enabled and trust:
+                for trust_role in trust['roles']:
+                    match_roles = [x for x in roles
+                                   if x['id'] == trust_role['id']]
+                    if match_roles:
+                        filtered_roles.append(match_roles[0])
+                    else:
+                        raise exception.Forbidden(
+                            _('Trustee has no delegated roles.'))
+            else:
+                for role in roles:
+                    filtered_roles.append({'id': role['id'],
+                                           'name': role['name']})
+
+            # user has no project or domain roles, therefore access denied
+            if not filtered_roles:
+                if token_project_id:
+                    msg = _('User %(user_id)s has no access '
+                            'to project %(project_id)s') % {
+                                'user_id': user_id,
+                                'project_id': token_project_id}
+                else:
+                    msg = _('User %(user_id)s has no access '
+                            'to domain %(domain_id)s') % {
+                                'user_id': user_id,
+                                'domain_id': token_domain_id}
+                LOG.debug(msg)
+                raise exception.Unauthorized(msg)
+
+            token_data['roles'] = filtered_roles
+
+    def _populate_service_catalog(self, token_data, user_id,
+                                  domain_id, project_id, trust):
+        if 'catalog' in token_data:
+            # no need to repopulate service catalog
+            return
+
+        if CONF.trust.enabled and trust:
+            user_id = trust['trustor_user_id']
+        if project_id or domain_id:
+            service_catalog = self.catalog_api.get_v3_catalog(
+                user_id, project_id)
+            # TODO(ayoung): Enforce Endpoints for trust
+            token_data['catalog'] = service_catalog
+
+    def _populate_service_providers(self, token_data):
+        if 'service_providers' in token_data:
+            return
+
+        service_providers = self.federation_api.get_enabled_service_providers()
+        if service_providers:
+            token_data['service_providers'] = service_providers
+
+    def _populate_token_dates(self, token_data, expires=None, trust=None,
+                              issued_at=None):
+        if not expires:
+            expires = provider.default_expire_time()
+        if not isinstance(expires, six.string_types):
+            expires = timeutils.isotime(expires, subsecond=True)
+        token_data['expires_at'] = expires
+        token_data['issued_at'] = (issued_at or
+                                   timeutils.isotime(subsecond=True))
+
+    def _populate_audit_info(self, token_data, audit_info=None):
+        if audit_info is None or isinstance(audit_info, six.string_types):
+            token_data['audit_ids'] = provider.audit_info(audit_info)
+        elif isinstance(audit_info, list):
+            token_data['audit_ids'] = audit_info
+        else:
+            msg = (_('Invalid audit info data type: %(data)s (%(type)s)') %
+                   {'data': audit_info, 'type': type(audit_info)})
+            LOG.error(msg)
+            raise exception.UnexpectedError(msg)
+
+    def get_token_data(self, user_id, method_names, extras=None,
+                       domain_id=None, project_id=None, expires=None,
+                       trust=None, token=None, include_catalog=True,
+                       bind=None, access_token=None, issued_at=None,
+                       audit_info=None):
+        if extras is None:
+            extras = {}
+        if extras:
+            versionutils.deprecated(
+                what='passing token data with "extras"',
+                as_of=versionutils.deprecated.KILO,
+                in_favor_of='well-defined APIs')
+        token_data = {'methods': method_names,
+                      'extras': extras}
+
+        # We've probably already written these to the token
+        if token:
+            for x in ('roles', 'user', 'catalog', 'project', 'domain'):
+                if x in token:
+                    token_data[x] = token[x]
+
+        if CONF.trust.enabled and trust:
+            if user_id != trust['trustee_user_id']:
+                raise exception.Forbidden(_('User is not a trustee.'))
+
+        if bind:
+            token_data['bind'] = bind
+
+        self._populate_scope(token_data, domain_id, project_id)
+        self._populate_user(token_data, user_id, trust)
+        self._populate_roles(token_data, user_id, domain_id, project_id, trust,
+                             access_token)
+        self._populate_audit_info(token_data, audit_info)
+
+        if include_catalog:
+            self._populate_service_catalog(token_data, user_id, domain_id,
+                                           project_id, trust)
+        self._populate_service_providers(token_data)
+        self._populate_token_dates(token_data, expires=expires, trust=trust,
+                                   issued_at=issued_at)
+        self._populate_oauth_section(token_data, access_token)
+        return {'token': token_data}
+
+
+@dependency.requires('catalog_api', 'identity_api', 'oauth_api',
+                     'resource_api', 'role_api', 'trust_api')
+class BaseProvider(provider.Provider):
+    def __init__(self, *args, **kwargs):
+        super(BaseProvider, self).__init__(*args, **kwargs)
+        self.v3_token_data_helper = V3TokenDataHelper()
+        self.v2_token_data_helper = V2TokenDataHelper()
+
+    def get_token_version(self, token_data):
+        if token_data and isinstance(token_data, dict):
+            if 'token_version' in token_data:
+                if token_data['token_version'] in token.provider.VERSIONS:
+                    return token_data['token_version']
+            # FIXME(morganfainberg): deprecate the following logic in future
+            # revisions. It is better to just specify the token_version in
+            # the token_data itself. This way we can support future versions
+            # that might have the same fields.
+            if 'access' in token_data:
+                return token.provider.V2
+            if 'token' in token_data and 'methods' in token_data['token']:
+                return token.provider.V3
+        raise exception.UnsupportedTokenVersionException()
+
+    def issue_v2_token(self, token_ref, roles_ref=None,
+                       catalog_ref=None):
+        metadata_ref = token_ref['metadata']
+        trust_ref = None
+        if CONF.trust.enabled and metadata_ref and 'trust_id' in metadata_ref:
+            trust_ref = self.trust_api.get_trust(metadata_ref['trust_id'])
+
+        token_data = self.v2_token_data_helper.format_token(
+            token_ref, roles_ref, catalog_ref, trust_ref)
+        token_id = self._get_token_id(token_data)
+        token_data['access']['token']['id'] = token_id
+        return token_id, token_data
+
+    def _is_mapped_token(self, auth_context):
+        return (federation.IDENTITY_PROVIDER in auth_context and
+                federation.PROTOCOL in auth_context)
+
+    def issue_v3_token(self, user_id, method_names, expires_at=None,
+                       project_id=None, domain_id=None, auth_context=None,
+                       trust=None, metadata_ref=None, include_catalog=True,
+                       parent_audit_id=None):
+        # for V2, trust is stashed in metadata_ref
+        if (CONF.trust.enabled and not trust and metadata_ref and
+                'trust_id' in metadata_ref):
+            trust = self.trust_api.get_trust(metadata_ref['trust_id'])
+
+        token_ref = None
+        if auth_context and self._is_mapped_token(auth_context):
+            token_ref = self._handle_mapped_tokens(
+                auth_context, project_id, domain_id)
+
+        access_token = None
+        if 'oauth1' in method_names:
+            access_token_id = auth_context['access_token_id']
+            access_token = self.oauth_api.get_access_token(access_token_id)
+
+        token_data = self.v3_token_data_helper.get_token_data(
+            user_id,
+            method_names,
+            auth_context.get('extras') if auth_context else None,
+            domain_id=domain_id,
+            project_id=project_id,
+            expires=expires_at,
+            trust=trust,
+            bind=auth_context.get('bind') if auth_context else None,
+            token=token_ref,
+            include_catalog=include_catalog,
+            access_token=access_token,
+            audit_info=parent_audit_id)
+
+        token_id = self._get_token_id(token_data)
+        return token_id, token_data
+
+    def _handle_mapped_tokens(self, auth_context, project_id, domain_id):
+        def get_federated_domain():
+            return (CONF.federation.federated_domain_name or
+                    federation.FEDERATED_DOMAIN_KEYWORD)
+
+        federated_domain = get_federated_domain()
+        user_id = auth_context['user_id']
+        group_ids = auth_context['group_ids']
+        idp = auth_context[federation.IDENTITY_PROVIDER]
+        protocol = auth_context[federation.PROTOCOL]
+        token_data = {
+            'user': {
+                'id': user_id,
+                'name': parse.unquote(user_id),
+                federation.FEDERATION: {
+                    'identity_provider': {'id': idp},
+                    'protocol': {'id': protocol}
+                },
+                'domain': {
+                    'id': federated_domain,
+                    'name': federated_domain
+                }
+            }
+        }
+
+        if project_id or domain_id:
+            roles = self.v3_token_data_helper._populate_roles_for_groups(
+                group_ids, project_id, domain_id, user_id)
+            token_data.update({'roles': roles})
+        else:
+            token_data['user'][federation.FEDERATION].update({
+                'groups': [{'id': x} for x in group_ids]
+            })
+        return token_data
+
+    def _verify_token_ref(self, token_ref):
+        """Verify and return the given token_ref."""
+        if not token_ref:
+            raise exception.Unauthorized()
+        return token_ref
+
+    def _assert_is_not_federation_token(self, token_ref):
+        """Make sure we aren't using v2 auth on a federation token."""
+        token_data = token_ref.get('token_data')
+        if (token_data and self.get_token_version(token_data) ==
+                token.provider.V3):
+            if 'OS-FEDERATION' in token_data['token']['user']:
+                msg = _('Attempting to use OS-FEDERATION token with V2 '
+                        'Identity Service, use V3 Authentication')
+                raise exception.Unauthorized(msg)
+
+    def _assert_default_domain(self, token_ref):
+        """Make sure we are operating on default domain only."""
+        if (token_ref.get('token_data') and
+                self.get_token_version(token_ref.get('token_data')) ==
+                token.provider.V3):
+            # this is a V3 token
+            msg = _('Non-default domain is not supported')
+            # user in a non-default is prohibited
+            if (token_ref['token_data']['token']['user']['domain']['id'] !=
+                    CONF.identity.default_domain_id):
+                raise exception.Unauthorized(msg)
+            # domain scoping is prohibited
+            if token_ref['token_data']['token'].get('domain'):
+                raise exception.Unauthorized(
+                    _('Domain scoped token is not supported'))
+            # project in non-default domain is prohibited
+            if token_ref['token_data']['token'].get('project'):
+                project = token_ref['token_data']['token']['project']
+                project_domain_id = project['domain']['id']
+                # scoped to project in non-default domain is prohibited
+                if project_domain_id != CONF.identity.default_domain_id:
+                    raise exception.Unauthorized(msg)
+            # if token is scoped to trust, both trustor and trustee must
+            # be in the default domain. Furthermore, the delegated project
+            # must also be in the default domain
+            metadata_ref = token_ref['metadata']
+            if CONF.trust.enabled and 'trust_id' in metadata_ref:
+                trust_ref = self.trust_api.get_trust(metadata_ref['trust_id'])
+                trustee_user_ref = self.identity_api.get_user(
+                    trust_ref['trustee_user_id'])
+                if (trustee_user_ref['domain_id'] !=
+                        CONF.identity.default_domain_id):
+                    raise exception.Unauthorized(msg)
+                trustor_user_ref = self.identity_api.get_user(
+                    trust_ref['trustor_user_id'])
+                if (trustor_user_ref['domain_id'] !=
+                        CONF.identity.default_domain_id):
+                    raise exception.Unauthorized(msg)
+                project_ref = self.resource_api.get_project(
+                    trust_ref['project_id'])
+                if (project_ref['domain_id'] !=
+                        CONF.identity.default_domain_id):
+                    raise exception.Unauthorized(msg)
+
+    def validate_v2_token(self, token_ref):
+        try:
+            self._assert_is_not_federation_token(token_ref)
+            self._assert_default_domain(token_ref)
+            # FIXME(gyee): performance or correctness? Should we return the
+            # cached token or reconstruct it? Obviously if we are going with
+            # the cached token, any role, project, or domain name changes
+            # will not be reflected. One may argue that with PKI tokens,
+            # we are essentially doing cached token validation anyway.
+            # Lets go with the cached token strategy. Since token
+            # management layer is now pluggable, one can always provide
+            # their own implementation to suit their needs.
+            token_data = token_ref.get('token_data')
+            if (not token_data or
+                    self.get_token_version(token_data) !=
+                    token.provider.V2):
+                # token is created by old v2 logic
+                metadata_ref = token_ref['metadata']
+                roles_ref = []
+                for role_id in metadata_ref.get('roles', []):
+                    roles_ref.append(self.role_api.get_role(role_id))
+
+                # Get a service catalog if possible
+                # This is needed for on-behalf-of requests
+                catalog_ref = None
+                if token_ref.get('tenant'):
+                    catalog_ref = self.catalog_api.get_catalog(
+                        token_ref['user']['id'],
+                        token_ref['tenant']['id'])
+
+                trust_ref = None
+                if CONF.trust.enabled and 'trust_id' in metadata_ref:
+                    trust_ref = self.trust_api.get_trust(
+                        metadata_ref['trust_id'])
+
+                token_data = self.v2_token_data_helper.format_token(
+                    token_ref, roles_ref, catalog_ref, trust_ref)
+
+            trust_id = token_data['access'].get('trust', {}).get('id')
+            if trust_id:
+                # token trust validation
+                self.trust_api.get_trust(trust_id)
+
+            return token_data
+        except exception.ValidationError as e:
+            LOG.exception(_LE('Failed to validate token'))
+            raise exception.TokenNotFound(e)
+
+    def validate_v3_token(self, token_ref):
+        # FIXME(gyee): performance or correctness? Should we return the
+        # cached token or reconstruct it? Obviously if we are going with
+        # the cached token, any role, project, or domain name changes
+        # will not be reflected. One may argue that with PKI tokens,
+        # we are essentially doing cached token validation anyway.
+        # Lets go with the cached token strategy. Since token
+        # management layer is now pluggable, one can always provide
+        # their own implementation to suit their needs.
+
+        trust_id = token_ref.get('trust_id')
+        if trust_id:
+            # token trust validation
+            self.trust_api.get_trust(trust_id)
+
+        token_data = token_ref.get('token_data')
+        if not token_data or 'token' not in token_data:
+            # token ref is created by V2 API
+            project_id = None
+            project_ref = token_ref.get('tenant')
+            if project_ref:
+                project_id = project_ref['id']
+
+            issued_at = token_ref['token_data']['access']['token']['issued_at']
+            audit = token_ref['token_data']['access']['token'].get('audit_ids')
+
+            token_data = self.v3_token_data_helper.get_token_data(
+                token_ref['user']['id'],
+                ['password', 'token'],
+                project_id=project_id,
+                bind=token_ref.get('bind'),
+                expires=token_ref['expires'],
+                issued_at=issued_at,
+                audit_info=audit)
+        return token_data
diff --git a/keystone-moon/keystone/token/providers/fernet/__init__.py b/keystone-moon/keystone/token/providers/fernet/__init__.py
new file mode 100644 (file)
index 0000000..953ef62
--- /dev/null
@@ -0,0 +1,13 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.token.providers.fernet.core import *  # noqa
diff --git a/keystone-moon/keystone/token/providers/fernet/core.py b/keystone-moon/keystone/token/providers/fernet/core.py
new file mode 100644 (file)
index 0000000..b1da263
--- /dev/null
@@ -0,0 +1,267 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+from oslo_log import log
+
+from keystone.common import dependency
+from keystone.contrib import federation
+from keystone import exception
+from keystone.i18n import _
+from keystone.token import provider
+from keystone.token.providers import common
+from keystone.token.providers.fernet import token_formatters as tf
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+@dependency.requires('trust_api')
+class Provider(common.BaseProvider):
+    def __init__(self, *args, **kwargs):
+        super(Provider, self).__init__(*args, **kwargs)
+
+        self.token_formatter = tf.TokenFormatter()
+
+    def needs_persistence(self):
+        """Should the token be written to a backend."""
+        return False
+
+    def issue_v2_token(self, token_ref, roles_ref=None, catalog_ref=None):
+        """Issue a V2 formatted token.
+
+        :param token_ref: reference describing the token
+        :param roles_ref: reference describing the roles for the token
+        :param catalog_ref: reference describing the token's catalog
+        :returns: tuple containing the ID of the token and the token data
+
+        """
+        # TODO(lbragstad): Currently, Fernet tokens don't support bind in the
+        # token format. Raise a 501 if we're dealing with bind.
+        if token_ref.get('bind'):
+            raise exception.NotImplemented()
+
+        user_id = token_ref['user']['id']
+        # Default to password since methods not provided by token_ref
+        method_names = ['password']
+        project_id = None
+        # Verify that tenant is not None in token_ref
+        if token_ref.get('tenant'):
+            project_id = token_ref['tenant']['id']
+
+        parent_audit_id = token_ref.get('parent_audit_id')
+        # If parent_audit_id is defined then a token authentication was made
+        if parent_audit_id:
+            method_names.append('token')
+
+        audit_ids = provider.audit_info(parent_audit_id)
+
+        # Get v3 token data and exclude building v3 specific catalog. This is
+        # due to the fact that the V2TokenDataHelper.format_token() method
+        # doesn't build any of the token_reference from other Keystone APIs.
+        # Instead, it builds it from what is persisted in the token reference.
+        # Here we are going to leverage the V3TokenDataHelper.get_token_data()
+        # method written for V3 because it goes through and populates the token
+        # reference dynamically. Once we have a V3 token reference, we can
+        # attempt to convert it to a V2 token response.
+        v3_token_data = self.v3_token_data_helper.get_token_data(
+            user_id,
+            method_names,
+            project_id=project_id,
+            token=token_ref,
+            include_catalog=False,
+            audit_info=audit_ids)
+
+        expires_at = v3_token_data['token']['expires_at']
+        token_id = self.token_formatter.create_token(user_id, expires_at,
+                                                     audit_ids,
+                                                     methods=method_names,
+                                                     project_id=project_id)
+        # Convert v3 to v2 token data and build v2 catalog
+        token_data = self.v2_token_data_helper.v3_to_v2_token(token_id,
+                                                              v3_token_data)
+
+        return token_id, token_data
+
+    def _build_federated_info(self, token_data):
+        """Extract everything needed for federated tokens.
+
+        This dictionary is passed to the FederatedPayload token formatter,
+        which unpacks the values and builds the Fernet token.
+
+        """
+        group_ids = token_data.get('user', {}).get(
+            federation.FEDERATION, {}).get('groups')
+        idp_id = token_data.get('user', {}).get(
+            federation.FEDERATION, {}).get('identity_provider', {}).get('id')
+        protocol_id = token_data.get('user', {}).get(
+            federation.FEDERATION, {}).get('protocol', {}).get('id')
+        if not group_ids:
+            group_ids = list()
+        federated_dict = dict(group_ids=group_ids, idp_id=idp_id,
+                              protocol_id=protocol_id)
+        return federated_dict
+
+    def _rebuild_federated_info(self, federated_dict, user_id):
+        """Format federated information into the token reference.
+
+        The federated_dict is passed back from the FederatedPayload token
+        formatter. The responsibility of this method is to format the
+        information passed back from the token formatter into the token
+        reference before constructing the token data from the
+        V3TokenDataHelper.
+
+        """
+        g_ids = federated_dict['group_ids']
+        idp_id = federated_dict['idp_id']
+        protocol_id = federated_dict['protocol_id']
+        federated_info = dict(groups=g_ids,
+                              identity_provider=dict(id=idp_id),
+                              protocol=dict(id=protocol_id))
+        token_dict = {'user': {federation.FEDERATION: federated_info}}
+        token_dict['user']['id'] = user_id
+        token_dict['user']['name'] = user_id
+        return token_dict
+
+    def issue_v3_token(self, user_id, method_names, expires_at=None,
+                       project_id=None, domain_id=None, auth_context=None,
+                       trust=None, metadata_ref=None, include_catalog=True,
+                       parent_audit_id=None):
+        """Issue a V3 formatted token.
+
+        Here is where we need to detect what is given to us, and what kind of
+        token the user is expecting. Depending on the outcome of that, we can
+        pass all the information to be packed to the proper token format
+        handler.
+
+        :param user_id: ID of the user
+        :param method_names: method of authentication
+        :param expires_at: token expiration time
+        :param project_id: ID of the project being scoped to
+        :param domain_id: ID of the domain being scoped to
+        :param auth_context: authentication context
+        :param trust: ID of the trust
+        :param metadata_ref: metadata reference
+        :param include_catalog: return the catalog in the response if True,
+                                otherwise don't return the catalog
+        :param parent_audit_id: ID of the patent audit entity
+        :returns: tuple containing the id of the token and the token data
+
+        """
+        # TODO(lbragstad): Currently, Fernet tokens don't support bind in the
+        # token format. Raise a 501 if we're dealing with bind.
+        if auth_context.get('bind'):
+            raise exception.NotImplemented()
+
+        token_ref = None
+        # NOTE(lbragstad): This determines if we are dealing with a federated
+        # token or not. The groups for the user will be in the returned token
+        # reference.
+        federated_dict = None
+        if auth_context and self._is_mapped_token(auth_context):
+            token_ref = self._handle_mapped_tokens(
+                auth_context, project_id, domain_id)
+            federated_dict = self._build_federated_info(token_ref)
+
+        token_data = self.v3_token_data_helper.get_token_data(
+            user_id,
+            method_names,
+            auth_context.get('extras') if auth_context else None,
+            domain_id=domain_id,
+            project_id=project_id,
+            expires=expires_at,
+            trust=trust,
+            bind=auth_context.get('bind') if auth_context else None,
+            token=token_ref,
+            include_catalog=include_catalog,
+            audit_info=parent_audit_id)
+
+        token = self.token_formatter.create_token(
+            user_id,
+            token_data['token']['expires_at'],
+            token_data['token']['audit_ids'],
+            methods=method_names,
+            domain_id=domain_id,
+            project_id=project_id,
+            trust_id=token_data['token'].get('OS-TRUST:trust', {}).get('id'),
+            federated_info=federated_dict)
+        return token, token_data
+
+    def validate_v2_token(self, token_ref):
+        """Validate a V2 formatted token.
+
+        :param token_ref: reference describing the token to validate
+        :returns: the token data
+        :raises keystone.exception.Unauthorized: if v3 token is used
+
+        """
+        (user_id, methods,
+         audit_ids, domain_id,
+         project_id, trust_id,
+         federated_info, created_at,
+         expires_at) = self.token_formatter.validate_token(token_ref)
+
+        if trust_id or domain_id or federated_info:
+            msg = _('This is not a v2.0 Fernet token. Use v3 for trust, '
+                    'domain, or federated tokens.')
+            raise exception.Unauthorized(msg)
+
+        v3_token_data = self.v3_token_data_helper.get_token_data(
+            user_id,
+            methods,
+            project_id=project_id,
+            expires=expires_at,
+            issued_at=created_at,
+            token=token_ref,
+            include_catalog=False,
+            audit_info=audit_ids)
+        return self.v2_token_data_helper.v3_to_v2_token(token_ref,
+                                                        v3_token_data)
+
+    def validate_v3_token(self, token):
+        """Validate a V3 formatted token.
+
+        :param token: a string describing the token to validate
+        :returns: the token data
+        :raises keystone.exception.Unauthorized: if token format version isn't
+                                                 supported
+
+        """
+        (user_id, methods, audit_ids, domain_id, project_id, trust_id,
+            federated_info, created_at, expires_at) = (
+                self.token_formatter.validate_token(token))
+
+        token_dict = None
+        if federated_info:
+            token_dict = self._rebuild_federated_info(federated_info, user_id)
+        trust_ref = self.trust_api.get_trust(trust_id)
+
+        return self.v3_token_data_helper.get_token_data(
+            user_id,
+            method_names=methods,
+            domain_id=domain_id,
+            project_id=project_id,
+            issued_at=created_at,
+            expires=expires_at,
+            trust=trust_ref,
+            token=token_dict,
+            audit_info=audit_ids)
+
+    def _get_token_id(self, token_data):
+        """Generate the token_id based upon the data in token_data.
+
+        :param token_data: token information
+        :type token_data: dict
+        :raises keystone.exception.NotImplemented: when called
+        """
+        raise exception.NotImplemented()
diff --git a/keystone-moon/keystone/token/providers/fernet/token_formatters.py b/keystone-moon/keystone/token/providers/fernet/token_formatters.py
new file mode 100644 (file)
index 0000000..5096092
--- /dev/null
@@ -0,0 +1,545 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import datetime
+import struct
+import uuid
+
+from cryptography import fernet
+import msgpack
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import timeutils
+import six
+from six.moves import urllib
+
+from keystone.auth import plugins as auth_plugins
+from keystone import exception
+from keystone.i18n import _
+from keystone.token import provider
+from keystone.token.providers.fernet import utils
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+# Fernet byte indexes as as computed by pypi/keyless_fernet and defined in
+# https://github.com/fernet/spec
+TIMESTAMP_START = 1
+TIMESTAMP_END = 9
+
+
+class TokenFormatter(object):
+    """Packs and unpacks payloads into tokens for transport."""
+
+    @property
+    def crypto(self):
+        """Return a cryptography instance.
+
+        You can extend this class with a custom crypto @property to provide
+        your own token encoding / decoding. For example, using a different
+        cryptography library (e.g. ``python-keyczar``) or to meet arbitrary
+        security requirements.
+
+        This @property just needs to return an object that implements
+        ``encrypt(plaintext)`` and ``decrypt(ciphertext)``.
+
+        """
+        keys = utils.load_keys()
+
+        if not keys:
+            raise exception.KeysNotFound()
+
+        fernet_instances = [fernet.Fernet(key) for key in utils.load_keys()]
+        return fernet.MultiFernet(fernet_instances)
+
+    def pack(self, payload):
+        """Pack a payload for transport as a token."""
+        # base64 padding (if any) is not URL-safe
+        return urllib.parse.quote(self.crypto.encrypt(payload))
+
+    def unpack(self, token):
+        """Unpack a token, and validate the payload."""
+        token = urllib.parse.unquote(six.binary_type(token))
+
+        try:
+            return self.crypto.decrypt(token)
+        except fernet.InvalidToken as e:
+            raise exception.Unauthorized(six.text_type(e))
+
+    @classmethod
+    def creation_time(cls, fernet_token):
+        """Returns the creation time of a valid Fernet token."""
+        # tokens may be transmitted as Unicode, but they're just ASCII
+        # (pypi/cryptography will refuse to operate on Unicode input)
+        fernet_token = six.binary_type(fernet_token)
+
+        # the base64 padding on fernet tokens is made URL-safe
+        fernet_token = urllib.parse.unquote(fernet_token)
+
+        # fernet tokens are base64 encoded and the padding made URL-safe
+        token_bytes = base64.urlsafe_b64decode(fernet_token)
+
+        # slice into the byte array to get just the timestamp
+        timestamp_bytes = token_bytes[TIMESTAMP_START:TIMESTAMP_END]
+
+        # convert those bytes to an integer
+        # (it's a 64-bit "unsigned long long int" in C)
+        timestamp_int = struct.unpack(">Q", timestamp_bytes)[0]
+
+        # and with an integer, it's trivial to produce a datetime object
+        created_at = datetime.datetime.utcfromtimestamp(timestamp_int)
+
+        return created_at
+
+    def create_token(self, user_id, expires_at, audit_ids, methods=None,
+                     domain_id=None, project_id=None, trust_id=None,
+                     federated_info=None):
+        """Given a set of payload attributes, generate a Fernet token."""
+        if trust_id:
+            version = TrustScopedPayload.version
+            payload = TrustScopedPayload.assemble(
+                user_id,
+                methods,
+                project_id,
+                expires_at,
+                audit_ids,
+                trust_id)
+        elif federated_info:
+            version = FederatedPayload.version
+            payload = FederatedPayload.assemble(
+                user_id,
+                methods,
+                expires_at,
+                audit_ids,
+                federated_info)
+        elif project_id:
+            version = ProjectScopedPayload.version
+            payload = ProjectScopedPayload.assemble(
+                user_id,
+                methods,
+                project_id,
+                expires_at,
+                audit_ids)
+        elif domain_id:
+            version = DomainScopedPayload.version
+            payload = DomainScopedPayload.assemble(
+                user_id,
+                methods,
+                domain_id,
+                expires_at,
+                audit_ids)
+        else:
+            version = UnscopedPayload.version
+            payload = UnscopedPayload.assemble(
+                user_id,
+                methods,
+                expires_at,
+                audit_ids)
+
+        versioned_payload = (version,) + payload
+        serialized_payload = msgpack.packb(versioned_payload)
+        token = self.pack(serialized_payload)
+
+        return token
+
+    def validate_token(self, token):
+        """Validates a Fernet token and returns the payload attributes."""
+        # Convert v2 unicode token to a string
+        if not isinstance(token, six.binary_type):
+            token = token.encode('ascii')
+
+        serialized_payload = self.unpack(token)
+        versioned_payload = msgpack.unpackb(serialized_payload)
+        version, payload = versioned_payload[0], versioned_payload[1:]
+
+        # depending on the formatter, these may or may not be defined
+        domain_id = None
+        project_id = None
+        trust_id = None
+        federated_info = None
+
+        if version == UnscopedPayload.version:
+            (user_id, methods, expires_at, audit_ids) = (
+                UnscopedPayload.disassemble(payload))
+        elif version == DomainScopedPayload.version:
+            (user_id, methods, domain_id, expires_at, audit_ids) = (
+                DomainScopedPayload.disassemble(payload))
+        elif version == ProjectScopedPayload.version:
+            (user_id, methods, project_id, expires_at, audit_ids) = (
+                ProjectScopedPayload.disassemble(payload))
+        elif version == TrustScopedPayload.version:
+            (user_id, methods, project_id, expires_at, audit_ids, trust_id) = (
+                TrustScopedPayload.disassemble(payload))
+        elif version == FederatedPayload.version:
+            (user_id, methods, expires_at, audit_ids, federated_info) = (
+                FederatedPayload.disassemble(payload))
+        else:
+            # If the token_format is not recognized, raise Unauthorized.
+            raise exception.Unauthorized(_(
+                'This is not a recognized Fernet payload version: %s') %
+                version)
+
+        # rather than appearing in the payload, the creation time is encoded
+        # into the token format itself
+        created_at = TokenFormatter.creation_time(token)
+        created_at = timeutils.isotime(at=created_at, subsecond=True)
+        expires_at = timeutils.parse_isotime(expires_at)
+        expires_at = timeutils.isotime(at=expires_at, subsecond=True)
+
+        return (user_id, methods, audit_ids, domain_id, project_id, trust_id,
+                federated_info, created_at, expires_at)
+
+
+class BasePayload(object):
+    # each payload variant should have a unique version
+    version = None
+
+    @classmethod
+    def assemble(cls, *args):
+        """Assemble the payload of a token.
+
+        :param args: whatever data should go into the payload
+        :returns: the payload of a token
+
+        """
+        raise NotImplementedError()
+
+    @classmethod
+    def disassemble(cls, payload):
+        """Disassemble an unscoped payload into the component data.
+
+        :param payload: this variant of payload
+        :returns: a tuple of the payloads component data
+
+        """
+        raise NotImplementedError()
+
+    @classmethod
+    def convert_uuid_hex_to_bytes(cls, uuid_string):
+        """Compress UUID formatted strings to bytes.
+
+        :param uuid_string: uuid string to compress to bytes
+        :returns: a byte representation of the uuid
+
+        """
+        # TODO(lbragstad): Wrap this in an exception. Not sure what the case
+        # would be where we couldn't handle what we've been given but incase
+        # the integrity of the token has been compromised.
+        uuid_obj = uuid.UUID(uuid_string)
+        return uuid_obj.bytes
+
+    @classmethod
+    def convert_uuid_bytes_to_hex(cls, uuid_byte_string):
+        """Generate uuid.hex format based on byte string.
+
+        :param uuid_byte_string: uuid string to generate from
+        :returns: uuid hex formatted string
+
+        """
+        # TODO(lbragstad): Wrap this in an exception. Not sure what the case
+        # would be where we couldn't handle what we've been given but incase
+        # the integrity of the token has been compromised.
+        uuid_obj = uuid.UUID(bytes=uuid_byte_string)
+        return uuid_obj.hex
+
+    @classmethod
+    def _convert_time_string_to_int(cls, time_string):
+        """Convert a time formatted string to a timestamp integer.
+
+        :param time_string: time formatted string
+        :returns: an integer timestamp
+
+        """
+        time_object = timeutils.parse_isotime(time_string)
+        return (timeutils.normalize_time(time_object) -
+                datetime.datetime.utcfromtimestamp(0)).total_seconds()
+
+    @classmethod
+    def _convert_int_to_time_string(cls, time_int):
+        """Convert a timestamp integer to a string.
+
+        :param time_int: integer representing timestamp
+        :returns: a time formatted strings
+
+        """
+        time_object = datetime.datetime.utcfromtimestamp(int(time_int))
+        return timeutils.isotime(time_object)
+
+    @classmethod
+    def attempt_convert_uuid_hex_to_bytes(cls, value):
+        """Attempt to convert value to bytes or return value.
+
+        :param value: value to attempt to convert to bytes
+        :returns: uuid value in bytes or value
+
+        """
+        try:
+            return cls.convert_uuid_hex_to_bytes(value)
+        except ValueError:
+            # this might not be a UUID, depending on the situation (i.e.
+            # federation)
+            return value
+
+    @classmethod
+    def attempt_convert_uuid_bytes_to_hex(cls, value):
+        """Attempt to convert value to hex or return value.
+
+        :param value: value to attempt to convert to hex
+        :returns: uuid value in hex or value
+
+        """
+        try:
+            return cls.convert_uuid_bytes_to_hex(value)
+        except ValueError:
+            return value
+
+
+class UnscopedPayload(BasePayload):
+    version = 0
+
+    @classmethod
+    def assemble(cls, user_id, methods, expires_at, audit_ids):
+        """Assemble the payload of an unscoped token.
+
+        :param user_id: identifier of the user in the token request
+        :param methods: list of authentication methods used
+        :param expires_at: datetime of the token's expiration
+        :param audit_ids: list of the token's audit IDs
+        :returns: the payload of an unscoped token
+
+        """
+        b_user_id = cls.convert_uuid_hex_to_bytes(user_id)
+        methods = auth_plugins.convert_method_list_to_integer(methods)
+        expires_at_int = cls._convert_time_string_to_int(expires_at)
+        b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
+                           audit_ids))
+        return (b_user_id, methods, expires_at_int, b_audit_ids)
+
+    @classmethod
+    def disassemble(cls, payload):
+        """Disassemble an unscoped payload into the component data.
+
+        :param payload: the payload of an unscoped token
+        :return: a tuple containing the user_id, auth methods, expires_at, and
+                 audit_ids
+
+        """
+        user_id = cls.convert_uuid_bytes_to_hex(payload[0])
+        methods = auth_plugins.convert_integer_to_method_list(payload[1])
+        expires_at_str = cls._convert_int_to_time_string(payload[2])
+        audit_ids = list(map(provider.base64_encode, payload[3]))
+        return (user_id, methods, expires_at_str, audit_ids)
+
+
+class DomainScopedPayload(BasePayload):
+    version = 1
+
+    @classmethod
+    def assemble(cls, user_id, methods, domain_id, expires_at, audit_ids):
+        """Assemble the payload of a domain-scoped token.
+
+        :param user_id: ID of the user in the token request
+        :param methods: list of authentication methods used
+        :param domain_id: ID of the domain to scope to
+        :param expires_at: datetime of the token's expiration
+        :param audit_ids: list of the token's audit IDs
+        :returns: the payload of a domain-scoped token
+
+        """
+        b_user_id = cls.convert_uuid_hex_to_bytes(user_id)
+        methods = auth_plugins.convert_method_list_to_integer(methods)
+        try:
+            b_domain_id = cls.convert_uuid_hex_to_bytes(domain_id)
+        except ValueError:
+            # the default domain ID is configurable, and probably isn't a UUID
+            if domain_id == CONF.identity.default_domain_id:
+                b_domain_id = domain_id
+            else:
+                raise
+        expires_at_int = cls._convert_time_string_to_int(expires_at)
+        b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
+                           audit_ids))
+        return (b_user_id, methods, b_domain_id, expires_at_int, b_audit_ids)
+
+    @classmethod
+    def disassemble(cls, payload):
+        """Disassemble a payload into the component data.
+
+        :param payload: the payload of a token
+        :return: a tuple containing the user_id, auth methods, domain_id,
+                 expires_at_str, and audit_ids
+
+        """
+        user_id = cls.convert_uuid_bytes_to_hex(payload[0])
+        methods = auth_plugins.convert_integer_to_method_list(payload[1])
+        try:
+            domain_id = cls.convert_uuid_bytes_to_hex(payload[2])
+        except ValueError:
+            # the default domain ID is configurable, and probably isn't a UUID
+            if payload[2] == CONF.identity.default_domain_id:
+                domain_id = payload[2]
+            else:
+                raise
+        expires_at_str = cls._convert_int_to_time_string(payload[3])
+        audit_ids = list(map(provider.base64_encode, payload[4]))
+
+        return (user_id, methods, domain_id, expires_at_str, audit_ids)
+
+
+class ProjectScopedPayload(BasePayload):
+    version = 2
+
+    @classmethod
+    def assemble(cls, user_id, methods, project_id, expires_at, audit_ids):
+        """Assemble the payload of a project-scoped token.
+
+        :param user_id: ID of the user in the token request
+        :param methods: list of authentication methods used
+        :param project_id: ID of the project to scope to
+        :param expires_at: datetime of the token's expiration
+        :param audit_ids: list of the token's audit IDs
+        :returns: the payload of a project-scoped token
+
+        """
+        b_user_id = cls.convert_uuid_hex_to_bytes(user_id)
+        methods = auth_plugins.convert_method_list_to_integer(methods)
+        b_project_id = cls.convert_uuid_hex_to_bytes(project_id)
+        expires_at_int = cls._convert_time_string_to_int(expires_at)
+        b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
+                           audit_ids))
+        return (b_user_id, methods, b_project_id, expires_at_int, b_audit_ids)
+
+    @classmethod
+    def disassemble(cls, payload):
+        """Disassemble a payload into the component data.
+
+        :param payload: the payload of a token
+        :return: a tuple containing the user_id, auth methods, project_id,
+                 expires_at_str, and audit_ids
+
+        """
+        user_id = cls.convert_uuid_bytes_to_hex(payload[0])
+        methods = auth_plugins.convert_integer_to_method_list(payload[1])
+        project_id = cls.convert_uuid_bytes_to_hex(payload[2])
+        expires_at_str = cls._convert_int_to_time_string(payload[3])
+        audit_ids = list(map(provider.base64_encode, payload[4]))
+
+        return (user_id, methods, project_id, expires_at_str, audit_ids)
+
+
+class TrustScopedPayload(BasePayload):
+    version = 3
+
+    @classmethod
+    def assemble(cls, user_id, methods, project_id, expires_at, audit_ids,
+                 trust_id):
+        """Assemble the payload of a trust-scoped token.
+
+        :param user_id: ID of the user in the token request
+        :param methods: list of authentication methods used
+        :param project_id: ID of the project to scope to
+        :param expires_at: datetime of the token's expiration
+        :param audit_ids: list of the token's audit IDs
+        :param trust_id: ID of the trust in effect
+        :returns: the payload of a trust-scoped token
+
+        """
+        b_user_id = cls.convert_uuid_hex_to_bytes(user_id)
+        methods = auth_plugins.convert_method_list_to_integer(methods)
+        b_project_id = cls.convert_uuid_hex_to_bytes(project_id)
+        b_trust_id = cls.convert_uuid_hex_to_bytes(trust_id)
+        expires_at_int = cls._convert_time_string_to_int(expires_at)
+        b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
+                           audit_ids))
+
+        return (b_user_id, methods, b_project_id, expires_at_int, b_audit_ids,
+                b_trust_id)
+
+    @classmethod
+    def disassemble(cls, payload):
+        """Validate a trust-based payload.
+
+        :param token_string: a string representing the token
+        :returns: a tuple containing the user_id, auth methods, project_id,
+                  expires_at_str, audit_ids, and trust_id
+
+        """
+        user_id = cls.convert_uuid_bytes_to_hex(payload[0])
+        methods = auth_plugins.convert_integer_to_method_list(payload[1])
+        project_id = cls.convert_uuid_bytes_to_hex(payload[2])
+        expires_at_str = cls._convert_int_to_time_string(payload[3])
+        audit_ids = list(map(provider.base64_encode, payload[4]))
+        trust_id = cls.convert_uuid_bytes_to_hex(payload[5])
+
+        return (user_id, methods, project_id, expires_at_str, audit_ids,
+                trust_id)
+
+
+class FederatedPayload(BasePayload):
+    version = 4
+
+    @classmethod
+    def assemble(cls, user_id, methods, expires_at, audit_ids, federated_info):
+        """Assemble the payload of a federated token.
+
+        :param user_id: ID of the user in the token request
+        :param methods: list of authentication methods used
+        :param expires_at: datetime of the token's expiration
+        :param audit_ids: list of the token's audit IDs
+        :param federated_info: dictionary containing group IDs, the identity
+                               provider ID, protocol ID, and federated domain
+                               ID
+        :returns: the payload of a federated token
+
+        """
+        def pack_group_ids(group_dict):
+            return cls.convert_uuid_hex_to_bytes(group_dict['id'])
+
+        b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
+        methods = auth_plugins.convert_method_list_to_integer(methods)
+        b_group_ids = map(pack_group_ids, federated_info['group_ids'])
+        b_idp_id = cls.attempt_convert_uuid_hex_to_bytes(
+            federated_info['idp_id'])
+        protocol_id = federated_info['protocol_id']
+        expires_at_int = cls._convert_time_string_to_int(expires_at)
+        b_audit_ids = map(provider.random_urlsafe_str_to_bytes, audit_ids)
+
+        return (b_user_id, methods, b_group_ids, b_idp_id, protocol_id,
+                expires_at_int, b_audit_ids)
+
+    @classmethod
+    def disassemble(cls, payload):
+        """Validate a federated paylod.
+
+        :param token_string: a string representing the token
+        :return: a tuple containing the user_id, auth methods, audit_ids, and
+                 a dictionary containing federated information such as the the
+                 group IDs, the identity provider ID, the protocol ID, and the
+                 federated domain ID
+
+        """
+        def unpack_group_ids(group_id_in_bytes):
+            group_id = cls.convert_uuid_bytes_to_hex(group_id_in_bytes)
+            return {'id': group_id}
+
+        user_id = cls.attempt_convert_uuid_bytes_to_hex(payload[0])
+        methods = auth_plugins.convert_integer_to_method_list(payload[1])
+        group_ids = map(unpack_group_ids, payload[2])
+        idp_id = cls.attempt_convert_uuid_bytes_to_hex(payload[3])
+        protocol_id = payload[4]
+        expires_at_str = cls._convert_int_to_time_string(payload[5])
+        audit_ids = map(provider.base64_encode, payload[6])
+        federated_info = dict(group_ids=group_ids, idp_id=idp_id,
+                              protocol_id=protocol_id)
+        return (user_id, methods, expires_at_str, audit_ids, federated_info)
diff --git a/keystone-moon/keystone/token/providers/fernet/utils.py b/keystone-moon/keystone/token/providers/fernet/utils.py
new file mode 100644 (file)
index 0000000..56624ee
--- /dev/null
@@ -0,0 +1,243 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import stat
+
+from cryptography import fernet
+from oslo_config import cfg
+from oslo_log import log
+
+from keystone.i18n import _LE, _LW, _LI
+
+
+LOG = log.getLogger(__name__)
+
+CONF = cfg.CONF
+
+
+def validate_key_repository():
+    """Validate permissions on the key repository directory."""
+    # NOTE(lbragstad): We shouldn't need to check if the directory was passed
+    # in as None because we don't set allow_no_values to True.
+
+    # ensure current user has full access to the key repository
+    if (not os.access(CONF.fernet_tokens.key_repository, os.R_OK) or not
+            os.access(CONF.fernet_tokens.key_repository, os.W_OK) or not
+            os.access(CONF.fernet_tokens.key_repository, os.X_OK)):
+        LOG.error(
+            _LE('Either [fernet_tokens] key_repository does not exist or '
+                'Keystone does not have sufficient permission to access it: '
+                '%s'), CONF.fernet_tokens.key_repository)
+        return False
+
+    # ensure the key repository isn't world-readable
+    stat_info = os.stat(CONF.fernet_tokens.key_repository)
+    if stat_info.st_mode & stat.S_IROTH or stat_info.st_mode & stat.S_IXOTH:
+        LOG.warning(_LW(
+            '[fernet_tokens] key_repository is world readable: %s'),
+            CONF.fernet_tokens.key_repository)
+
+    return True
+
+
+def _convert_to_integers(id_value):
+    """Cast user and group system identifiers to integers."""
+    # NOTE(lbragstad) os.chown() will raise a TypeError here if
+    # keystone_user_id and keystone_group_id are not integers. Let's
+    # cast them to integers if we can because it's possible to pass non-integer
+    # values into the fernet_setup utility.
+    try:
+        id_int = int(id_value)
+    except ValueError as e:
+        msg = ('Unable to convert Keystone user or group ID. Error: %s', e)
+        LOG.error(msg)
+        raise
+
+    return id_int
+
+
+def create_key_directory(keystone_user_id=None, keystone_group_id=None):
+    """If the configured key directory does not exist, attempt to create it."""
+    if not os.access(CONF.fernet_tokens.key_repository, os.F_OK):
+        LOG.info(_LI(
+            '[fernet_tokens] key_repository does not appear to exist; '
+            'attempting to create it'))
+
+        try:
+            os.makedirs(CONF.fernet_tokens.key_repository, 0o700)
+        except OSError:
+            LOG.error(_LE(
+                'Failed to create [fernet_tokens] key_repository: either it '
+                'already exists or you don\'t have sufficient permissions to '
+                'create it'))
+
+        if keystone_user_id and keystone_group_id:
+            os.chown(
+                CONF.fernet_tokens.key_repository,
+                keystone_user_id,
+                keystone_group_id)
+        elif keystone_user_id or keystone_group_id:
+            LOG.warning(_LW(
+                'Unable to change the ownership of [fernet_tokens] '
+                'key_repository without a keystone user ID and keystone group '
+                'ID both being provided: %s') %
+                CONF.fernet_tokens.key_repository)
+
+
+def _create_new_key(keystone_user_id, keystone_group_id):
+    """Securely create a new encryption key.
+
+    Create a new key that is readable by the Keystone group and Keystone user.
+    """
+    key = fernet.Fernet.generate_key()
+
+    # This ensures the key created is not world-readable
+    old_umask = os.umask(0o177)
+    if keystone_user_id and keystone_group_id:
+        old_egid = os.getegid()
+        old_euid = os.geteuid()
+        os.setegid(keystone_group_id)
+        os.seteuid(keystone_user_id)
+    elif keystone_user_id or keystone_group_id:
+        LOG.warning(_LW(
+            'Unable to change the ownership of the new key without a keystone '
+            'user ID and keystone group ID both being provided: %s') %
+            CONF.fernet_tokens.key_repository)
+    # Determine the file name of the new key
+    key_file = os.path.join(CONF.fernet_tokens.key_repository, '0')
+    try:
+        with open(key_file, 'w') as f:
+            f.write(key)
+    finally:
+        # After writing the key, set the umask back to it's original value. Do
+        # the same with group and user identifiers if a Keystone group or user
+        # was supplied.
+        os.umask(old_umask)
+        if keystone_user_id and keystone_group_id:
+            os.seteuid(old_euid)
+            os.setegid(old_egid)
+
+    LOG.info(_LI('Created a new key: %s'), key_file)
+
+
+def initialize_key_repository(keystone_user_id=None, keystone_group_id=None):
+    """Create a key repository and bootstrap it with a key.
+
+    :param keystone_user_id: User ID of the Keystone user.
+    :param keystone_group_id: Group ID of the Keystone user.
+
+    """
+    # make sure we have work to do before proceeding
+    if os.access(os.path.join(CONF.fernet_tokens.key_repository, '0'),
+                 os.F_OK):
+        LOG.info(_LI('Key repository is already initialized; aborting.'))
+        return
+
+    # bootstrap an existing key
+    _create_new_key(keystone_user_id, keystone_group_id)
+
+    # ensure that we end up with a primary and secondary key
+    rotate_keys(keystone_user_id, keystone_group_id)
+
+
+def rotate_keys(keystone_user_id=None, keystone_group_id=None):
+    """Create a new primary key and revoke excess active keys.
+
+    :param keystone_user_id: User ID of the Keystone user.
+    :param keystone_group_id: Group ID of the Keystone user.
+
+    Key rotation utilizes the following behaviors:
+
+    - The highest key number is used as the primary key (used for encryption).
+    - All keys can be used for decryption.
+    - New keys are always created as key "0," which serves as a placeholder
+      before promoting it to be the primary key.
+
+    This strategy allows you to safely perform rotation on one node in a
+    cluster, before syncing the results of the rotation to all other nodes
+    (during both key rotation and synchronization, all nodes must recognize all
+    primary keys).
+
+    """
+    # read the list of key files
+    key_files = dict()
+    for filename in os.listdir(CONF.fernet_tokens.key_repository):
+        path = os.path.join(CONF.fernet_tokens.key_repository, str(filename))
+        if os.path.isfile(path):
+            key_files[int(filename)] = path
+
+    LOG.info(_LI('Starting key rotation with %(count)s key files: %(list)s'), {
+        'count': len(key_files),
+        'list': key_files.values()})
+
+    # determine the number of the new primary key
+    current_primary_key = max(key_files.keys())
+    LOG.info(_LI('Current primary key is: %s'), current_primary_key)
+    new_primary_key = current_primary_key + 1
+    LOG.info(_LI('Next primary key will be: %s'), new_primary_key)
+
+    # promote the next primary key to be the primary
+    os.rename(
+        os.path.join(CONF.fernet_tokens.key_repository, '0'),
+        os.path.join(CONF.fernet_tokens.key_repository, str(new_primary_key)))
+    key_files.pop(0)
+    key_files[new_primary_key] = os.path.join(
+        CONF.fernet_tokens.key_repository,
+        str(new_primary_key))
+    LOG.info(_LI('Promoted key 0 to be the primary: %s'), new_primary_key)
+
+    # add a new key to the rotation, which will be the *next* primary
+    _create_new_key(keystone_user_id, keystone_group_id)
+
+    # check for bad configuration
+    if CONF.fernet_tokens.max_active_keys < 1:
+        LOG.warning(_LW(
+            '[fernet_tokens] max_active_keys must be at least 1 to maintain a '
+            'primary key.'))
+        CONF.fernet_tokens.max_active_keys = 1
+
+    # purge excess keys
+    keys = sorted(key_files.keys())
+    excess_keys = (
+        keys[:len(key_files) - CONF.fernet_tokens.max_active_keys + 1])
+    LOG.info(_LI('Excess keys to purge: %s'), excess_keys)
+    for i in excess_keys:
+        os.remove(key_files[i])
+
+
+def load_keys():
+    """Load keys from disk into a list.
+
+    The first key in the list is the primary key used for encryption. All
+    other keys are active secondary keys that can be used for decrypting
+    tokens.
+
+    """
+    if not validate_key_repository():
+        return []
+
+    # build a dictionary of key_number:encryption_key pairs
+    keys = dict()
+    for filename in os.listdir(CONF.fernet_tokens.key_repository):
+        path = os.path.join(CONF.fernet_tokens.key_repository, str(filename))
+        if os.path.isfile(path):
+            with open(path, 'r') as key_file:
+                keys[int(filename)] = key_file.read()
+
+    LOG.info(_LI(
+        'Loaded %(count)s encryption keys from: %(dir)s'), {
+            'count': len(keys),
+            'dir': CONF.fernet_tokens.key_repository})
+
+    # return the encryption_keys, sorted by key number, descending
+    return [keys[x] for x in sorted(keys.keys(), reverse=True)]
diff --git a/keystone-moon/keystone/token/providers/pki.py b/keystone-moon/keystone/token/providers/pki.py
new file mode 100644 (file)
index 0000000..61b4281
--- /dev/null
@@ -0,0 +1,53 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Keystone PKI Token Provider"""
+
+from keystoneclient.common import cms
+from oslo_config import cfg
+from oslo_log import log
+from oslo_serialization import jsonutils
+
+from keystone.common import environment
+from keystone.common import utils
+from keystone import exception
+from keystone.i18n import _, _LE
+from keystone.token.providers import common
+
+
+CONF = cfg.CONF
+
+LOG = log.getLogger(__name__)
+
+
+class Provider(common.BaseProvider):
+    def _get_token_id(self, token_data):
+        try:
+            # force conversion to a string as the keystone client cms code
+            # produces unicode.  This can be removed if the client returns
+            # str()
+            # TODO(ayoung): Make to a byte_str for Python3
+            token_json = jsonutils.dumps(token_data, cls=utils.PKIEncoder)
+            token_id = str(cms.cms_sign_token(token_json,
+                                              CONF.signing.certfile,
+                                              CONF.signing.keyfile))
+            return token_id
+        except environment.subprocess.CalledProcessError:
+            LOG.exception(_LE('Unable to sign token'))
+            raise exception.UnexpectedError(_(
+                'Unable to sign token.'))
+
+    def needs_persistence(self):
+        """Should the token be written to a backend."""
+        return True
diff --git a/keystone-moon/keystone/token/providers/pkiz.py b/keystone-moon/keystone/token/providers/pkiz.py
new file mode 100644 (file)
index 0000000..b6f2944
--- /dev/null
@@ -0,0 +1,51 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Keystone Compressed PKI Token Provider"""
+
+from keystoneclient.common import cms
+from oslo_config import cfg
+from oslo_log import log
+from oslo_serialization import jsonutils
+
+from keystone.common import environment
+from keystone.common import utils
+from keystone import exception
+from keystone.i18n import _
+from keystone.token.providers import common
+
+
+CONF = cfg.CONF
+
+LOG = log.getLogger(__name__)
+ERROR_MESSAGE = _('Unable to sign token.')
+
+
+class Provider(common.BaseProvider):
+    def _get_token_id(self, token_data):
+        try:
+            # force conversion to a string as the keystone client cms code
+            # produces unicode. This can be removed if the client returns
+            # str()
+            # TODO(ayoung): Make to a byte_str for Python3
+            token_json = jsonutils.dumps(token_data, cls=utils.PKIEncoder)
+            token_id = str(cms.pkiz_sign(token_json,
+                                         CONF.signing.certfile,
+                                         CONF.signing.keyfile))
+            return token_id
+        except environment.subprocess.CalledProcessError:
+            LOG.exception(ERROR_MESSAGE)
+            raise exception.UnexpectedError(ERROR_MESSAGE)
+
+    def needs_persistence(self):
+        """Should the token be written to a backend."""
+        return True
diff --git a/keystone-moon/keystone/token/providers/uuid.py b/keystone-moon/keystone/token/providers/uuid.py
new file mode 100644 (file)
index 0000000..15118d8
--- /dev/null
@@ -0,0 +1,33 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Keystone UUID Token Provider"""
+
+from __future__ import absolute_import
+
+import uuid
+
+from keystone.token.providers import common
+
+
+class Provider(common.BaseProvider):
+    def __init__(self, *args, **kwargs):
+        super(Provider, self).__init__(*args, **kwargs)
+
+    def _get_token_id(self, token_data):
+        return uuid.uuid4().hex
+
+    def needs_persistence(self):
+        """Should the token be written to a backend."""
+        return True
diff --git a/keystone-moon/keystone/token/routers.py b/keystone-moon/keystone/token/routers.py
new file mode 100644 (file)
index 0000000..bcd40ee
--- /dev/null
@@ -0,0 +1,59 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from keystone.common import wsgi
+from keystone.token import controllers
+
+
+class Router(wsgi.ComposableRouter):
+    def add_routes(self, mapper):
+        token_controller = controllers.Auth()
+        mapper.connect('/tokens',
+                       controller=token_controller,
+                       action='authenticate',
+                       conditions=dict(method=['POST']))
+        mapper.connect('/tokens/revoked',
+                       controller=token_controller,
+                       action='revocation_list',
+                       conditions=dict(method=['GET']))
+        mapper.connect('/tokens/{token_id}',
+                       controller=token_controller,
+                       action='validate_token',
+                       conditions=dict(method=['GET']))
+        # NOTE(morganfainberg): For policy enforcement reasons, the
+        # ``validate_token_head`` method is still used for HEAD requests.
+        # The controller method makes the same call as the validate_token
+        # call and lets wsgi.render_response remove the body data.
+        mapper.connect('/tokens/{token_id}',
+                       controller=token_controller,
+                       action='validate_token_head',
+                       conditions=dict(method=['HEAD']))
+        mapper.connect('/tokens/{token_id}',
+                       controller=token_controller,
+                       action='delete_token',
+                       conditions=dict(method=['DELETE']))
+        mapper.connect('/tokens/{token_id}/endpoints',
+                       controller=token_controller,
+                       action='endpoints',
+                       conditions=dict(method=['GET']))
+
+        # Certificates used to verify auth tokens
+        mapper.connect('/certificates/ca',
+                       controller=token_controller,
+                       action='ca_cert',
+                       conditions=dict(method=['GET']))
+
+        mapper.connect('/certificates/signing',
+                       controller=token_controller,
+                       action='signing_cert',
+                       conditions=dict(method=['GET']))
diff --git a/keystone-moon/keystone/trust/__init__.py b/keystone-moon/keystone/trust/__init__.py
new file mode 100644 (file)
index 0000000..e5ee61f
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.trust import controllers  # noqa
+from keystone.trust.core import *  # noqa
+from keystone.trust import routers  # noqa
diff --git a/keystone-moon/keystone/trust/backends/__init__.py b/keystone-moon/keystone/trust/backends/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/keystone-moon/keystone/trust/backends/sql.py b/keystone-moon/keystone/trust/backends/sql.py
new file mode 100644 (file)
index 0000000..4f5ee2e
--- /dev/null
@@ -0,0 +1,180 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import time
+
+from oslo_log import log
+from oslo_utils import timeutils
+
+from keystone.common import sql
+from keystone import exception
+from keystone import trust
+
+
+LOG = log.getLogger(__name__)
+# The maximum number of iterations that will be attempted for optimistic
+# locking on consuming a limited-use trust.
+MAXIMUM_CONSUME_ATTEMPTS = 10
+
+
+class TrustModel(sql.ModelBase, sql.DictBase):
+    __tablename__ = 'trust'
+    attributes = ['id', 'trustor_user_id', 'trustee_user_id',
+                  'project_id', 'impersonation', 'expires_at',
+                  'remaining_uses', 'deleted_at']
+    id = sql.Column(sql.String(64), primary_key=True)
+    # user id of owner
+    trustor_user_id = sql.Column(sql.String(64), nullable=False,)
+    # user_id of user allowed to consume this preauth
+    trustee_user_id = sql.Column(sql.String(64), nullable=False)
+    project_id = sql.Column(sql.String(64))
+    impersonation = sql.Column(sql.Boolean, nullable=False)
+    deleted_at = sql.Column(sql.DateTime)
+    expires_at = sql.Column(sql.DateTime)
+    remaining_uses = sql.Column(sql.Integer, nullable=True)
+    extra = sql.Column(sql.JsonBlob())
+
+
+class TrustRole(sql.ModelBase):
+    __tablename__ = 'trust_role'
+    attributes = ['trust_id', 'role_id']
+    trust_id = sql.Column(sql.String(64), primary_key=True, nullable=False)
+    role_id = sql.Column(sql.String(64), primary_key=True, nullable=False)
+
+
+class Trust(trust.Driver):
+    @sql.handle_conflicts(conflict_type='trust')
+    def create_trust(self, trust_id, trust, roles):
+        with sql.transaction() as session:
+            ref = TrustModel.from_dict(trust)
+            ref['id'] = trust_id
+            if ref.get('expires_at') and ref['expires_at'].tzinfo is not None:
+                ref['expires_at'] = timeutils.normalize_time(ref['expires_at'])
+            session.add(ref)
+            added_roles = []
+            for role in roles:
+                trust_role = TrustRole()
+                trust_role.trust_id = trust_id
+                trust_role.role_id = role['id']
+                added_roles.append({'id': role['id']})
+                session.add(trust_role)
+        trust_dict = ref.to_dict()
+        trust_dict['roles'] = added_roles
+        return trust_dict
+
+    def _add_roles(self, trust_id, session, trust_dict):
+        roles = []
+        for role in session.query(TrustRole).filter_by(trust_id=trust_id):
+            roles.append({'id': role.role_id})
+        trust_dict['roles'] = roles
+
+    @sql.handle_conflicts(conflict_type='trust')
+    def consume_use(self, trust_id):
+
+        for attempt in range(MAXIMUM_CONSUME_ATTEMPTS):
+            with sql.transaction() as session:
+                try:
+                    query_result = (session.query(TrustModel.remaining_uses).
+                                    filter_by(id=trust_id).
+                                    filter_by(deleted_at=None).one())
+                except sql.NotFound:
+                    raise exception.TrustNotFound(trust_id=trust_id)
+
+                remaining_uses = query_result.remaining_uses
+
+                if remaining_uses is None:
+                    # unlimited uses, do nothing
+                    break
+                elif remaining_uses > 0:
+                    # NOTE(morganfainberg): use an optimistic locking method
+                    # to ensure we only ever update a trust that has the
+                    # expected number of remaining uses.
+                    rows_affected = (
+                        session.query(TrustModel).
+                        filter_by(id=trust_id).
+                        filter_by(deleted_at=None).
+                        filter_by(remaining_uses=remaining_uses).
+                        update({'remaining_uses': (remaining_uses - 1)},
+                               synchronize_session=False))
+                    if rows_affected == 1:
+                        # Successfully consumed a single limited-use trust.
+                        # Since trust_id is the PK on the Trust table, there is
+                        # no case we should match more than 1 row in the
+                        # update. We either update 1 row or 0 rows.
+                        break
+                else:
+                    raise exception.TrustUseLimitReached(trust_id=trust_id)
+            # NOTE(morganfainberg): Ensure we have a yield point for eventlet
+            # here. This should cost us nothing otherwise. This can be removed
+            # if/when oslo_db cleanly handles yields on db calls.
+            time.sleep(0)
+        else:
+            # NOTE(morganfainberg): In the case the for loop is not prematurely
+            # broken out of, this else block is executed. This means the trust
+            # was not unlimited nor was it consumed (we hit the maximum
+            # iteration limit). This is just an indicator that we were unable
+            # to get the optimistic lock rather than silently failing or
+            # incorrectly indicating a trust was consumed.
+            raise exception.TrustConsumeMaximumAttempt(trust_id=trust_id)
+
+    def get_trust(self, trust_id, deleted=False):
+        session = sql.get_session()
+        query = session.query(TrustModel).filter_by(id=trust_id)
+        if not deleted:
+            query = query.filter_by(deleted_at=None)
+        ref = query.first()
+        if ref is None:
+            return None
+        if ref.expires_at is not None and not deleted:
+            now = timeutils.utcnow()
+            if now > ref.expires_at:
+                return None
+        # Do not return trusts that can't be used anymore
+        if ref.remaining_uses is not None and not deleted:
+            if ref.remaining_uses <= 0:
+                return None
+        trust_dict = ref.to_dict()
+
+        self._add_roles(trust_id, session, trust_dict)
+        return trust_dict
+
+    @sql.handle_conflicts(conflict_type='trust')
+    def list_trusts(self):
+        session = sql.get_session()
+        trusts = session.query(TrustModel).filter_by(deleted_at=None)
+        return [trust_ref.to_dict() for trust_ref in trusts]
+
+    @sql.handle_conflicts(conflict_type='trust')
+    def list_trusts_for_trustee(self, trustee_user_id):
+        session = sql.get_session()
+        trusts = (session.query(TrustModel).
+                  filter_by(deleted_at=None).
+                  filter_by(trustee_user_id=trustee_user_id))
+        return [trust_ref.to_dict() for trust_ref in trusts]
+
+    @sql.handle_conflicts(conflict_type='trust')
+    def list_trusts_for_trustor(self, trustor_user_id):
+        session = sql.get_session()
+        trusts = (session.query(TrustModel).
+                  filter_by(deleted_at=None).
+                  filter_by(trustor_user_id=trustor_user_id))
+        return [trust_ref.to_dict() for trust_ref in trusts]
+
+    @sql.handle_conflicts(conflict_type='trust')
+    def delete_trust(self, trust_id):
+        with sql.transaction() as session:
+            trust_ref = session.query(TrustModel).get(trust_id)
+            if not trust_ref:
+                raise exception.TrustNotFound(trust_id=trust_id)
+            trust_ref.deleted_at = timeutils.utcnow()
diff --git a/keystone-moon/keystone/trust/controllers.py b/keystone-moon/keystone/trust/controllers.py
new file mode 100644 (file)
index 0000000..60e34cc
--- /dev/null
@@ -0,0 +1,287 @@
+# Copyright 2013 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import uuid
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import timeutils
+import six
+
+from keystone import assignment
+from keystone.common import controller
+from keystone.common import dependency
+from keystone.common import validation
+from keystone import exception
+from keystone.i18n import _
+from keystone.models import token_model
+from keystone import notifications
+from keystone.openstack.common import versionutils
+from keystone.trust import schema
+
+
+CONF = cfg.CONF
+
+LOG = log.getLogger(__name__)
+
+
+def _trustor_trustee_only(trust, user_id):
+    if (user_id != trust.get('trustee_user_id') and
+            user_id != trust.get('trustor_user_id')):
+                raise exception.Forbidden()
+
+
+def _admin_trustor_only(context, trust, user_id):
+    if user_id != trust.get('trustor_user_id') and not context['is_admin']:
+        raise exception.Forbidden()
+
+
+@dependency.requires('assignment_api', 'identity_api', 'role_api',
+                     'token_provider_api', 'trust_api')
+class TrustV3(controller.V3Controller):
+    collection_name = "trusts"
+    member_name = "trust"
+
+    @classmethod
+    def base_url(cls, context, path=None):
+        """Construct a path and pass it to V3Controller.base_url method."""
+
+        # NOTE(stevemar): Overriding path to /OS-TRUST/trusts so that
+        # V3Controller.base_url handles setting the self link correctly.
+        path = '/OS-TRUST/' + cls.collection_name
+        return super(TrustV3, cls).base_url(context, path=path)
+
+    def _get_user_id(self, context):
+        if 'token_id' in context:
+            token_id = context['token_id']
+            token_data = self.token_provider_api.validate_token(token_id)
+            token_ref = token_model.KeystoneToken(token_id=token_id,
+                                                  token_data=token_data)
+            return token_ref.user_id
+        return None
+
+    def get_trust(self, context, trust_id):
+        user_id = self._get_user_id(context)
+        trust = self.trust_api.get_trust(trust_id)
+        if not trust:
+            raise exception.TrustNotFound(trust_id=trust_id)
+        _trustor_trustee_only(trust, user_id)
+        self._fill_in_roles(context, trust,
+                            self.role_api.list_roles())
+        return TrustV3.wrap_member(context, trust)
+
+    def _fill_in_roles(self, context, trust, all_roles):
+        if trust.get('expires_at') is not None:
+            trust['expires_at'] = (timeutils.isotime
+                                   (trust['expires_at'],
+                                    subsecond=True))
+
+        if 'roles' not in trust:
+            trust['roles'] = []
+        trust_full_roles = []
+        for trust_role in trust['roles']:
+            if isinstance(trust_role, six.string_types):
+                trust_role = {'id': trust_role}
+            matching_roles = [x for x in all_roles
+                              if x['id'] == trust_role['id']]
+            if matching_roles:
+                full_role = assignment.controllers.RoleV3.wrap_member(
+                    context, matching_roles[0])['role']
+                trust_full_roles.append(full_role)
+        trust['roles'] = trust_full_roles
+        trust['roles_links'] = {
+            'self': (self.base_url(context) + "/%s/roles" % trust['id']),
+            'next': None,
+            'previous': None}
+
+    def _normalize_role_list(self, trust, all_roles):
+        trust_roles = []
+        all_role_names = {r['name']: r for r in all_roles}
+        for role in trust.get('roles', []):
+            if 'id' in role:
+                trust_roles.append({'id': role['id']})
+            elif 'name' in role:
+                rolename = role['name']
+                if rolename in all_role_names:
+                    trust_roles.append({'id':
+                                        all_role_names[rolename]['id']})
+                else:
+                    raise exception.RoleNotFound("role %s is not defined" %
+                                                 rolename)
+            else:
+                raise exception.ValidationError(attribute='id or name',
+                                                target='roles')
+        return trust_roles
+
+    @controller.protected()
+    @validation.validated(schema.trust_create, 'trust')
+    def create_trust(self, context, trust=None):
+        """Create a new trust.
+
+        The user creating the trust must be the trustor.
+
+        """
+        if not trust:
+            raise exception.ValidationError(attribute='trust',
+                                            target='request')
+
+        auth_context = context.get('environment',
+                                   {}).get('KEYSTONE_AUTH_CONTEXT', {})
+
+        # Check if delegated via trust
+        if auth_context.get('is_delegated_auth'):
+            # Redelegation case
+            src_trust_id = auth_context['trust_id']
+            if not src_trust_id:
+                raise exception.Forbidden(
+                    _('Redelegation allowed for delegated by trust only'))
+
+            redelegated_trust = self.trust_api.get_trust(src_trust_id)
+        else:
+            redelegated_trust = None
+
+        if trust.get('project_id'):
+            self._require_role(trust)
+        self._require_user_is_trustor(context, trust)
+        self._require_trustee_exists(trust['trustee_user_id'])
+        all_roles = self.role_api.list_roles()
+        # Normalize roles
+        normalized_roles = self._normalize_role_list(trust, all_roles)
+        trust['roles'] = normalized_roles
+        self._require_trustor_has_role_in_project(trust)
+        trust['expires_at'] = self._parse_expiration_date(
+            trust.get('expires_at'))
+        trust_id = uuid.uuid4().hex
+        initiator = notifications._get_request_audit_info(context)
+        new_trust = self.trust_api.create_trust(trust_id, trust,
+                                                normalized_roles,
+                                                redelegated_trust,
+                                                initiator)
+        self._fill_in_roles(context, new_trust, all_roles)
+        return TrustV3.wrap_member(context, new_trust)
+
+    def _require_trustee_exists(self, trustee_user_id):
+        self.identity_api.get_user(trustee_user_id)
+
+    def _require_user_is_trustor(self, context, trust):
+        user_id = self._get_user_id(context)
+        if user_id != trust.get('trustor_user_id'):
+            raise exception.Forbidden(
+                _("The authenticated user should match the trustor."))
+
+    def _require_role(self, trust):
+        if not trust.get('roles'):
+            raise exception.Forbidden(
+                _('At least one role should be specified.'))
+
+    def _get_user_role(self, trust):
+        if not self._attribute_is_empty(trust, 'project_id'):
+            return self.assignment_api.get_roles_for_user_and_project(
+                trust['trustor_user_id'], trust['project_id'])
+        else:
+            return []
+
+    def _require_trustor_has_role_in_project(self, trust):
+        user_roles = self._get_user_role(trust)
+        for trust_role in trust['roles']:
+            matching_roles = [x for x in user_roles
+                              if x == trust_role['id']]
+            if not matching_roles:
+                raise exception.RoleNotFound(role_id=trust_role['id'])
+
+    def _parse_expiration_date(self, expiration_date):
+        if expiration_date is None:
+            return None
+        if not expiration_date.endswith('Z'):
+            expiration_date += 'Z'
+        try:
+            return timeutils.parse_isotime(expiration_date)
+        except ValueError:
+            raise exception.ValidationTimeStampError()
+
+    def _check_role_for_trust(self, context, trust_id, role_id):
+        """Checks if a role has been assigned to a trust."""
+        trust = self.trust_api.get_trust(trust_id)
+        if not trust:
+            raise exception.TrustNotFound(trust_id=trust_id)
+        user_id = self._get_user_id(context)
+        _trustor_trustee_only(trust, user_id)
+        if not any(role['id'] == role_id for role in trust['roles']):
+            raise exception.RoleNotFound(role_id=role_id)
+
+    @controller.protected()
+    def list_trusts(self, context):
+        query = context['query_string']
+        trusts = []
+        if not query:
+            self.assert_admin(context)
+            trusts += self.trust_api.list_trusts()
+        if 'trustor_user_id' in query:
+            user_id = query['trustor_user_id']
+            calling_user_id = self._get_user_id(context)
+            if user_id != calling_user_id:
+                raise exception.Forbidden()
+            trusts += (self.trust_api.
+                       list_trusts_for_trustor(user_id))
+        if 'trustee_user_id' in query:
+            user_id = query['trustee_user_id']
+            calling_user_id = self._get_user_id(context)
+            if user_id != calling_user_id:
+                raise exception.Forbidden()
+            trusts += self.trust_api.list_trusts_for_trustee(user_id)
+        for trust in trusts:
+            # get_trust returns roles, list_trusts does not
+            # It seems in some circumstances, roles does not
+            # exist in the query response, so check first
+            if 'roles' in trust:
+                del trust['roles']
+            if trust.get('expires_at') is not None:
+                trust['expires_at'] = (timeutils.isotime
+                                       (trust['expires_at'],
+                                        subsecond=True))
+        return TrustV3.wrap_collection(context, trusts)
+
+    @controller.protected()
+    def delete_trust(self, context, trust_id):
+        trust = self.trust_api.get_trust(trust_id)
+        if not trust:
+            raise exception.TrustNotFound(trust_id=trust_id)
+
+        user_id = self._get_user_id(context)
+        _admin_trustor_only(context, trust, user_id)
+        initiator = notifications._get_request_audit_info(context)
+        self.trust_api.delete_trust(trust_id, initiator)
+
+    @controller.protected()
+    def list_roles_for_trust(self, context, trust_id):
+        trust = self.get_trust(context, trust_id)['trust']
+        if not trust:
+            raise exception.TrustNotFound(trust_id=trust_id)
+        user_id = self._get_user_id(context)
+        _trustor_trustee_only(trust, user_id)
+        return {'roles': trust['roles'],
+                'links': trust['roles_links']}
+
+    @versionutils.deprecated(
+        versionutils.deprecated.KILO,
+        remove_in=+2)
+    def check_role_for_trust(self, context, trust_id, role_id):
+        return self._check_role_for_trust(self, context, trust_id, role_id)
+
+    @controller.protected()
+    def get_role_for_trust(self, context, trust_id, role_id):
+        """Get a role that has been assigned to a trust."""
+        self._check_role_for_trust(context, trust_id, role_id)
+        role = self.role_api.get_role(role_id)
+        return assignment.controllers.RoleV3.wrap_member(context, role)
diff --git a/keystone-moon/keystone/trust/core.py b/keystone-moon/keystone/trust/core.py
new file mode 100644 (file)
index 0000000..de6b6d8
--- /dev/null
@@ -0,0 +1,251 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Main entry point into the Identity service."""
+
+import abc
+
+from oslo_config import cfg
+from oslo_log import log
+import six
+
+from keystone.common import dependency
+from keystone.common import manager
+from keystone import exception
+from keystone.i18n import _
+from keystone import notifications
+
+
+CONF = cfg.CONF
+
+LOG = log.getLogger(__name__)
+
+
+@dependency.requires('identity_api')
+@dependency.provider('trust_api')
+class Manager(manager.Manager):
+    """Default pivot point for the Trust backend.
+
+    See :mod:`keystone.common.manager.Manager` for more details on how this
+    dynamically calls the backend.
+
+    """
+    _TRUST = "OS-TRUST:trust"
+
+    def __init__(self):
+        super(Manager, self).__init__(CONF.trust.driver)
+
+    @staticmethod
+    def _validate_redelegation(redelegated_trust, trust):
+        # Validate against:
+        # 0 < redelegation_count <= max_redelegation_count
+        max_redelegation_count = CONF.trust.max_redelegation_count
+        redelegation_depth = redelegated_trust.get('redelegation_count', 0)
+        if not (0 < redelegation_depth <= max_redelegation_count):
+            raise exception.Forbidden(
+                _('Remaining redelegation depth of %(redelegation_depth)d'
+                  ' out of allowed range of [0..%(max_count)d]'),
+                redelegation_depth=redelegation_depth,
+                max_count=max_redelegation_count)
+
+        # remaining_uses is None
+        remaining_uses = trust.get('remaining_uses')
+        if remaining_uses is not None:
+            raise exception.Forbidden(
+                _('Field "remaining_uses" is set to %(value)s'
+                  ' while it must not be set in order to redelegate a trust'),
+                value=remaining_uses)
+
+        # expiry times
+        trust_expiry = trust.get('expires_at')
+        redelegated_expiry = redelegated_trust['expires_at']
+        if trust_expiry:
+            # redelegated trust is from backend and has no tzinfo
+            if redelegated_expiry < trust_expiry.replace(tzinfo=None):
+                raise exception.Forbidden(
+                    _('Requested expiration time is more '
+                      'than redelegated trust can provide'))
+        else:
+            trust['expires_at'] = redelegated_expiry
+
+        # trust roles is a subset of roles of the redelegated trust
+        parent_roles = set(role['id']
+                           for role in redelegated_trust['roles'])
+        if not all(role['id'] in parent_roles for role in trust['roles']):
+            raise exception.Forbidden(
+                _('Some of requested roles are not in redelegated trust'))
+
+    def get_trust_pedigree(self, trust_id):
+        trust = self.driver.get_trust(trust_id)
+        trust_chain = [trust]
+        if trust and trust.get('redelegated_trust_id'):
+            trusts = self.driver.list_trusts_for_trustor(
+                trust['trustor_user_id'])
+            while trust_chain[-1].get('redelegated_trust_id'):
+                for t in trusts:
+                    if t['id'] == trust_chain[-1]['redelegated_trust_id']:
+                        trust_chain.append(t)
+                        break
+
+        return trust_chain
+
+    def get_trust(self, trust_id, deleted=False):
+        trust = self.driver.get_trust(trust_id, deleted)
+
+        if trust and trust.get('redelegated_trust_id') and not deleted:
+            trust_chain = self.get_trust_pedigree(trust_id)
+
+            for parent, child in zip(trust_chain[1:], trust_chain):
+                self._validate_redelegation(parent, child)
+                try:
+                    self.identity_api.assert_user_enabled(
+                        parent['trustee_user_id'])
+                except (AssertionError, exception.NotFound):
+                    raise exception.Forbidden(
+                        _('One of the trust agents is disabled or deleted'))
+
+        return trust
+
+    def create_trust(self, trust_id, trust, roles, redelegated_trust=None,
+                     initiator=None):
+        """Create a new trust.
+
+        :returns: a new trust
+        """
+        # Default for initial trust in chain is max_redelegation_count
+        max_redelegation_count = CONF.trust.max_redelegation_count
+        requested_count = trust.get('redelegation_count')
+        redelegatable = (trust.pop('allow_redelegation', False)
+                         and requested_count != 0)
+        if not redelegatable:
+            trust['redelegation_count'] = requested_count = 0
+            remaining_uses = trust.get('remaining_uses')
+            if remaining_uses is not None and remaining_uses <= 0:
+                msg = _('remaining_uses must be a positive integer or null.')
+                raise exception.ValidationError(msg)
+        else:
+            # Validate requested redelegation depth
+            if requested_count and requested_count > max_redelegation_count:
+                raise exception.Forbidden(
+                    _('Requested redelegation depth of %(requested_count)d '
+                      'is greater than allowed %(max_count)d'),
+                    requested_count=requested_count,
+                    max_count=max_redelegation_count)
+            # Decline remaining_uses
+            if 'remaining_uses' in trust:
+                exception.ValidationError(_('remaining_uses must not be set '
+                                            'if redelegation is allowed'))
+
+        if redelegated_trust:
+            trust['redelegated_trust_id'] = redelegated_trust['id']
+            remaining_count = redelegated_trust['redelegation_count'] - 1
+
+            # Validate depth consistency
+            if (redelegatable and requested_count and
+                    requested_count != remaining_count):
+                msg = _('Modifying "redelegation_count" upon redelegation is '
+                        'forbidden. Omitting this parameter is advised.')
+                raise exception.Forbidden(msg)
+            trust.setdefault('redelegation_count', remaining_count)
+
+            # Check entire trust pedigree validity
+            pedigree = self.get_trust_pedigree(redelegated_trust['id'])
+            for t in pedigree:
+                self._validate_redelegation(t, trust)
+
+        trust.setdefault('redelegation_count', max_redelegation_count)
+        ref = self.driver.create_trust(trust_id, trust, roles)
+
+        notifications.Audit.created(self._TRUST, trust_id, initiator=initiator)
+
+        return ref
+
+    def delete_trust(self, trust_id, initiator=None):
+        """Remove a trust.
+
+        :raises: keystone.exception.TrustNotFound
+
+        Recursively remove given and redelegated trusts
+        """
+        trust = self.driver.get_trust(trust_id)
+        if not trust:
+            raise exception.TrustNotFound(trust_id)
+
+        trusts = self.driver.list_trusts_for_trustor(
+            trust['trustor_user_id'])
+
+        for t in trusts:
+            if t.get('redelegated_trust_id') == trust_id:
+                # recursive call to make sure all notifications are sent
+                try:
+                    self.delete_trust(t['id'])
+                except exception.TrustNotFound:
+                    # if trust was deleted by concurrent process
+                    # consistency must not suffer
+                    pass
+
+        # end recursion
+        self.driver.delete_trust(trust_id)
+
+        notifications.Audit.deleted(self._TRUST, trust_id, initiator)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Driver(object):
+
+    @abc.abstractmethod
+    def create_trust(self, trust_id, trust, roles):
+        """Create a new trust.
+
+        :returns: a new trust
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def get_trust(self, trust_id, deleted=False):
+        """Get a trust by the trust id.
+
+        :param trust_id: the trust identifier
+        :type trust_id: string
+        :param deleted: return the trust even if it is deleted, expired, or
+                        has no consumptions left
+        :type deleted: bool
+        """
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_trusts(self):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_trusts_for_trustee(self, trustee):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def list_trusts_for_trustor(self, trustor):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def delete_trust(self, trust_id):
+        raise exception.NotImplemented()  # pragma: no cover
+
+    @abc.abstractmethod
+    def consume_use(self, trust_id):
+        """Consume one use when a trust was created with a limitation on its
+        uses, provided there are still uses available.
+
+        :raises: keystone.exception.TrustUseLimitReached,
+                 keystone.exception.TrustNotFound
+        """
+        raise exception.NotImplemented()  # pragma: no cover
diff --git a/keystone-moon/keystone/trust/routers.py b/keystone-moon/keystone/trust/routers.py
new file mode 100644 (file)
index 0000000..3a6243c
--- /dev/null
@@ -0,0 +1,67 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""WSGI Routers for the Trust service."""
+
+import functools
+
+from keystone.common import json_home
+from keystone.common import wsgi
+from keystone.trust import controllers
+
+
+_build_resource_relation = functools.partial(
+    json_home.build_v3_extension_resource_relation, extension_name='OS-TRUST',
+    extension_version='1.0')
+
+TRUST_ID_PARAMETER_RELATION = json_home.build_v3_extension_parameter_relation(
+    'OS-TRUST', '1.0', 'trust_id')
+
+
+class Routers(wsgi.RoutersBase):
+
+    def append_v3_routers(self, mapper, routers):
+        trust_controller = controllers.TrustV3()
+
+        self._add_resource(
+            mapper, trust_controller,
+            path='/OS-TRUST/trusts',
+            get_action='list_trusts',
+            post_action='create_trust',
+            rel=_build_resource_relation(resource_name='trusts'))
+        self._add_resource(
+            mapper, trust_controller,
+            path='/OS-TRUST/trusts/{trust_id}',
+            get_action='get_trust',
+            delete_action='delete_trust',
+            rel=_build_resource_relation(resource_name='trust'),
+            path_vars={
+                'trust_id': TRUST_ID_PARAMETER_RELATION,
+            })
+        self._add_resource(
+            mapper, trust_controller,
+            path='/OS-TRUST/trusts/{trust_id}/roles',
+            get_action='list_roles_for_trust',
+            rel=_build_resource_relation(resource_name='trust_roles'),
+            path_vars={
+                'trust_id': TRUST_ID_PARAMETER_RELATION,
+            })
+        self._add_resource(
+            mapper, trust_controller,
+            path='/OS-TRUST/trusts/{trust_id}/roles/{role_id}',
+            get_head_action='get_role_for_trust',
+            rel=_build_resource_relation(resource_name='trust_role'),
+            path_vars={
+                'trust_id': TRUST_ID_PARAMETER_RELATION,
+                'role_id': json_home.Parameters.ROLE_ID,
+            })
diff --git a/keystone-moon/keystone/trust/schema.py b/keystone-moon/keystone/trust/schema.py
new file mode 100644 (file)
index 0000000..087cd1e
--- /dev/null
@@ -0,0 +1,46 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import validation
+from keystone.common.validation import parameter_types
+
+
+_trust_properties = {
+    'trustor_user_id': parameter_types.id_string,
+    'trustee_user_id': parameter_types.id_string,
+    'impersonation': parameter_types.boolean,
+    'project_id': validation.nullable(parameter_types.id_string),
+    'remaining_uses': {
+        'type': ['integer', 'null'],
+        'minimum': 1
+    },
+    'expires_at': {
+        'type': ['null', 'string']
+    },
+    'allow_redelegation': {
+        'type': ['boolean', 'null']
+    },
+    'redelegation_count': {
+        'type': ['integer', 'null'],
+        'minimum': 0
+    },
+    # TODO(lbragstad): Need to find a better way to do this. We should be
+    # checking that a role is a list of IDs and/or names.
+    'roles': validation.add_array_type(parameter_types.id_string)
+}
+
+trust_create = {
+    'type': 'object',
+    'properties': _trust_properties,
+    'required': ['trustor_user_id', 'trustee_user_id', 'impersonation'],
+    'additionalProperties': True
+}
diff --git a/keystone-moon/openstack-common.conf b/keystone-moon/openstack-common.conf
new file mode 100644 (file)
index 0000000..0bd4223
--- /dev/null
@@ -0,0 +1,12 @@
+[DEFAULT]
+
+module=fileutils
+module=service
+module=systemd
+module=versionutils
+
+script=tools/colorizer.py
+script=tools/install_venv_common.py
+
+# The base module to hold the copy of openstack.common
+base=keystone
diff --git a/keystone-moon/rally-scenarios/README.rst b/keystone-moon/rally-scenarios/README.rst
new file mode 100644 (file)
index 0000000..2c4a484
--- /dev/null
@@ -0,0 +1,5 @@
+This directory contains rally benchmark scenarios to be run by OpenStack CI.
+
+
+* more about rally: https://wiki.openstack.org/wiki/Rally
+* how to add rally-gates: https://wiki.openstack.org/wiki/Rally/RallyGates
diff --git a/keystone-moon/rally-scenarios/keystone.yaml b/keystone-moon/rally-scenarios/keystone.yaml
new file mode 100644 (file)
index 0000000..d0f2ebf
--- /dev/null
@@ -0,0 +1,9 @@
+---
+  KeystoneBasic.create_delete_user:
+    -
+      args:
+        name_length: 10
+      runner:
+        type: "constant"
+        times: 2500
+        concurrency: 60
diff --git a/keystone-moon/requirements-py3.txt b/keystone-moon/requirements-py3.txt
new file mode 100644 (file)
index 0000000..6f7c474
--- /dev/null
@@ -0,0 +1,38 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+
+pbr>=0.6,!=0.7,<1.0
+WebOb>=1.2.3
+# Eventlet is considered dead for Py3
+# eventlet>=0.16.1
+greenlet>=0.3.2
+netaddr>=0.7.12
+PasteDeploy>=1.5.0
+Paste
+Routes>=1.12.3,!=2.0
+six>=1.9.0
+SQLAlchemy>=0.9.7,<=0.9.99
+sqlalchemy-migrate>=0.9.5
+passlib
+iso8601>=0.1.9
+python-keystoneclient>=1.1.0
+keystonemiddleware>=1.0.0
+oslo.concurrency>=1.4.1         # Apache-2.0
+oslo.config>=1.9.0  # Apache-2.0
+# oslo.messaging tries to pull in eventlet
+# oslo.messaging>=1.6.0  # Apache-2.0
+oslo.db>=1.5.0  # Apache-2.0
+oslo.i18n>=1.3.0  # Apache-2.0
+oslo.log>=0.4.0  # Apache-2.0
+oslo.middleware>=0.3.0                  # Apache-2.0
+oslo.policy>=0.3.0  # Apache-2.0
+oslo.serialization>=1.2.0               # Apache-2.0
+oslo.utils>=1.2.0                       # Apache-2.0
+oauthlib>=0.6
+pysaml2
+dogpile.cache>=0.5.3
+jsonschema>=2.0.0,<3.0.0
+# pycadf tries to pull in eventlet
+# pycadf>=0.6.0
+posix_ipc
diff --git a/keystone-moon/requirements.txt b/keystone-moon/requirements.txt
new file mode 100644 (file)
index 0000000..1423644
--- /dev/null
@@ -0,0 +1,37 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+
+pbr>=0.6,!=0.7,<1.0
+WebOb>=1.2.3
+eventlet>=0.16.1
+greenlet>=0.3.2
+netaddr>=0.7.12
+PasteDeploy>=1.5.0
+Paste
+Routes>=1.12.3,!=2.0
+cryptography>=0.4 # Apache-2.0
+six>=1.9.0
+SQLAlchemy>=0.9.7,<=0.9.99
+sqlalchemy-migrate>=0.9.5
+passlib
+iso8601>=0.1.9
+python-keystoneclient>=1.1.0
+keystonemiddleware>=1.0.0
+oslo.concurrency>=1.4.1         # Apache-2.0
+oslo.config>=1.9.0  # Apache-2.0
+oslo.messaging>=1.6.0  # Apache-2.0
+oslo.db>=1.5.0  # Apache-2.0
+oslo.i18n>=1.3.0  # Apache-2.0
+oslo.log>=0.4.0  # Apache-2.0
+oslo.middleware>=0.3.0                  # Apache-2.0
+oslo.policy>=0.3.0  # Apache-2.0
+oslo.serialization>=1.2.0               # Apache-2.0
+oslo.utils>=1.2.0                       # Apache-2.0
+oauthlib>=0.6
+pysaml2
+dogpile.cache>=0.5.3
+jsonschema>=2.0.0,<3.0.0
+pycadf>=0.8.0
+posix_ipc
+msgpack-python>=0.4.0
diff --git a/keystone-moon/run_tests.sh b/keystone-moon/run_tests.sh
new file mode 100755 (executable)
index 0000000..46fcf4d
--- /dev/null
@@ -0,0 +1,161 @@
+#!/bin/bash
+
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+set -eu
+
+function usage {
+    echo "Usage: $0 [OPTION]..."
+    echo "Run Keystone's test suite(s)"
+    echo ""
+    echo "  -V, --virtual-env        Always use virtualenv.  Install automatically if not present"
+    echo "  -N, --no-virtual-env     Don't use virtualenv.  Run tests in local environment"
+    echo "  -x, --stop               Stop running tests after the first error or failure."
+    echo "  -f, --force              Force a clean re-build of the virtual environment. Useful when dependencies have been added."
+    echo "  -u, --update             Update the virtual environment with any newer package versions"
+    echo "  -p, --pep8               Just run flake8"
+    echo "  -8, --8                  Just run flake8, don't show PEP8 text for each error"
+    echo "  -P, --no-pep8            Don't run flake8"
+    echo "  -c, --coverage           Generate coverage report"
+    echo "  -h, --help               Print this usage message"
+    echo ""
+    echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
+    echo "      If no virtualenv is found, the script will ask if you would like to create one.  If you "
+    echo "      prefer to run tests NOT in a virtual environment, simply pass the -N option."
+    exit
+}
+
+function process_option {
+    case "$1" in
+        -h|--help) usage;;
+        -V|--virtual-env) always_venv=1; never_venv=0;;
+        -N|--no-virtual-env) always_venv=0; never_venv=1;;
+        -x|--stop) failfast=1;;
+        -f|--force) force=1;;
+        -u|--update) update=1;;
+        -p|--pep8) just_flake8=1;;
+        -8|--8) short_flake8=1;;
+        -P|--no-pep8) no_flake8=1;;
+        -c|--coverage) coverage=1;;
+        -*) testropts="$testropts $1";;
+        *) testrargs="$testrargs $1"
+    esac
+}
+
+venv=.venv
+with_venv=tools/with_venv.sh
+always_venv=0
+never_venv=0
+force=0
+failfast=0
+testrargs=
+testropts=--subunit
+wrapper=""
+just_flake8=0
+short_flake8=0
+no_flake8=0
+coverage=0
+update=0
+
+for arg in "$@"; do
+    process_option $arg
+done
+
+TESTRTESTS="python setup.py testr"
+
+# If enabled, tell nose to collect coverage data
+if [ $coverage -eq 1 ]; then
+    TESTRTESTS="$TESTRTESTS --coverage"
+fi
+
+function run_tests {
+    set -e
+    echo ${wrapper}
+    if [ $failfast -eq 1 ]; then
+        testrargs="$testrargs -- --failfast"
+    fi
+    ${wrapper} $TESTRTESTS --testr-args="$testropts $testrargs" | \
+        ${wrapper} subunit-2to1 | \
+        ${wrapper} tools/colorizer.py
+}
+
+function run_flake8 {
+    FLAGS=--show-pep8
+    if [ $# -gt 0 ] && [ 'short' == ''$1 ]; then
+        FLAGS=''
+    fi
+
+    echo "Running flake8 ..."
+    # Just run flake8 in current environment
+    echo ${wrapper} flake8 $FLAGS | tee pep8.txt
+    ${wrapper} flake8 $FLAGS | tee pep8.txt
+}
+
+if [ $never_venv -eq 0 ]; then
+    # Remove the virtual environment if --force used
+    if [ $force -eq 1 ]; then
+        echo "Cleaning virtualenv..."
+        rm -rf ${venv}
+    fi
+    if [ $update -eq 1 ]; then
+        echo "Updating virtualenv..."
+        python tools/install_venv.py
+    fi
+    if [ -e ${venv} ]; then
+        wrapper="${with_venv}"
+    else
+        if [ $always_venv -eq 1 ]; then
+            # Automatically install the virtualenv
+            python tools/install_venv.py
+            wrapper="${with_venv}"
+        else
+            echo -e "No virtual environment found...create one? (Y/n) \c"
+            read use_ve
+            if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
+                # Install the virtualenv and run the test suite in it
+                python tools/install_venv.py
+                wrapper=${with_venv}
+            fi
+        fi
+    fi
+fi
+
+# Delete old coverage data from previous runs
+if [ $coverage -eq 1 ]; then
+    ${wrapper} coverage erase
+fi
+
+if [ $just_flake8 -eq 1 ]; then
+    run_flake8
+    exit
+fi
+
+if [ $short_flake8 -eq 1 ]; then
+    run_flake8 short
+    exit
+fi
+
+
+run_tests
+
+# NOTE(sirp): we only want to run flake8 when we're running the full-test
+# suite, not when we're running tests individually. To handle this, we need to
+# distinguish between options (testropts), which begin with a '-', and arguments
+# (testrargs).
+if [ -z "$testrargs" ]; then
+    if [ $no_flake8 -eq 0 ]; then
+        run_flake8
+    fi
+fi
diff --git a/keystone-moon/setup.cfg b/keystone-moon/setup.cfg
new file mode 100644 (file)
index 0000000..e646480
--- /dev/null
@@ -0,0 +1,69 @@
+[metadata]
+name = keystone
+version = 2015.1
+summary = OpenStack Identity
+description-file =
+    README.rst
+author = OpenStack
+author-email = openstack-dev@lists.openstack.org
+home-page = http://www.openstack.org/
+classifier =
+    Environment :: OpenStack
+    Intended Audience :: Information Technology
+    Intended Audience :: System Administrators
+    License :: OSI Approved :: Apache Software License
+    Operating System :: POSIX :: Linux
+    Programming Language :: Python
+    Programming Language :: Python :: 2
+    Programming Language :: Python :: 2.7
+
+[files]
+packages =
+    keystone
+scripts =
+    bin/keystone-all
+    bin/keystone-manage
+
+[global]
+setup-hooks =
+    pbr.hooks.setup_hook
+
+
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
+[build_sphinx]
+all_files = 1
+build-dir = doc/build
+source-dir = doc/source
+
+[compile_catalog]
+directory = keystone/locale
+domain = keystone
+
+[update_catalog]
+domain = keystone
+output_dir = keystone/locale
+input_file = keystone/locale/keystone.pot
+
+[extract_messages]
+keywords = _ gettext ngettext l_ lazy_gettext
+mapping_file = babel.cfg
+output_file = keystone/locale/keystone.pot
+copyright_holder = OpenStack Foundation
+msgid_bugs_address = https://bugs.launchpad.net/keystone
+
+# NOTE(dstanek): Uncomment the [pbr] section below and remove the ext.apidoc
+# Sphinx extension when https://launchpad.net/bugs/1260495 is fixed.
+[pbr]
+warnerrors = True
+#autodoc_tree_index_modules = True
+#autodoc_tree_root = ./keystone
+
+[entry_points]
+oslo.config.opts =
+    keystone = keystone.common.config:list_opts
+    keystone.notifications = keystone.notifications:list_opts
+    keystone.openstack.common.eventlet_backdoor = keystone.openstack.common.eventlet_backdoor:list_opts
diff --git a/keystone-moon/setup.py b/keystone-moon/setup.py
new file mode 100644 (file)
index 0000000..7363757
--- /dev/null
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
+import setuptools
+
+# In python < 2.7.4, a lazy loading of package `pbr` will break
+# setuptools if some other modules registered functions in `atexit`.
+# solution from: http://bugs.python.org/issue15881#msg170215
+try:
+    import multiprocessing  # noqa
+except ImportError:
+    pass
+
+setuptools.setup(
+    setup_requires=['pbr'],
+    pbr=True)
diff --git a/keystone-moon/test-requirements-py3.txt b/keystone-moon/test-requirements-py3.txt
new file mode 100644 (file)
index 0000000..371e05f
--- /dev/null
@@ -0,0 +1,52 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+
+hacking>=0.10.0,<0.11
+bashate>=0.2 # Apache-2.0
+
+# pysqlite does not install on py3
+# Optional backend: SQL
+# pysqlite
+
+# python-memcached does not install on py3
+# Optional backend: Memcache
+# python-memcached>=1.48
+
+# Optional dogpile backend: MongoDB
+pymongo>=2.5
+
+# Optional backend: LDAP
+# python-ldap does not install on py3
+# authenticate against an existing LDAP server
+# python-ldap>=2.4
+# ldappool>=1.0 # MPL
+-e git+https://github.com/rbarrois/python-ldap.git@py3#egg=python-ldap
+
+# Testing
+# computes code coverage percentages
+coverage>=3.6
+# fixture stubbing
+fixtures>=0.3.14
+# xml parsing
+lxml>=2.3
+# mock object framework
+mock>=1.0
+oslotest>=1.2.0  # Apache-2.0
+# required to build documentation
+sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3
+# test wsgi apps without starting an http server
+WebTest>=2.0
+
+# mox was removed in favor of mock. We should not re-enable this module. See
+# discussion: http://lists.openstack.org/pipermail/openstack-dev/2013-July/012484.html
+#mox>=0.5.3
+
+discover
+python-subunit>=0.0.18
+testrepository>=0.0.18
+testtools>=0.9.36,!=1.2.0
+
+# For documentation
+oslosphinx>=2.2.0  # Apache-2.0
+
diff --git a/keystone-moon/test-requirements.txt b/keystone-moon/test-requirements.txt
new file mode 100644 (file)
index 0000000..170cb41
--- /dev/null
@@ -0,0 +1,50 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+
+hacking>=0.10.0,<0.11
+bashate>=0.2 # Apache-2.0
+
+# Optional backend: SQL
+pysqlite
+
+# Optional backend: Memcache
+python-memcached>=1.48
+
+# Optional dogpile backend: MongoDB
+pymongo>=2.5
+
+# Optional backend: LDAP
+# authenticate against an existing LDAP server
+python-ldap>=2.4
+ldappool>=1.0 # MPL
+
+# Testing
+# computes code coverage percentages
+coverage>=3.6
+# fixture stubbing
+fixtures>=0.3.14
+# xml parsing
+lxml>=2.3
+# mock object framework
+mock>=1.0
+oslotest>=1.2.0  # Apache-2.0
+# required to build documentation
+sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3
+# test wsgi apps without starting an http server
+WebTest>=2.0
+
+# mox was removed in favor of mock. We should not re-enable this module. See
+# discussion: http://lists.openstack.org/pipermail/openstack-dev/2013-July/012484.html
+#mox>=0.5.3
+
+discover
+python-subunit>=0.0.18
+testrepository>=0.0.18
+testtools>=0.9.36,!=1.2.0
+
+# For documentation
+oslosphinx>=2.2.0  # Apache-2.0
+
+tempest-lib>=0.3.0
+
diff --git a/keystone-moon/tools/colorizer.py b/keystone-moon/tools/colorizer.py
new file mode 100755 (executable)
index 0000000..a16c620
--- /dev/null
@@ -0,0 +1,333 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013, Nebula, Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+# Colorizer Code is borrowed from Twisted:
+# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
+#
+#    Permission is hereby granted, free of charge, to any person obtaining
+#    a copy of this software and associated documentation files (the
+#    "Software"), to deal in the Software without restriction, including
+#    without limitation the rights to use, copy, modify, merge, publish,
+#    distribute, sublicense, and/or sell copies of the Software, and to
+#    permit persons to whom the Software is furnished to do so, subject to
+#    the following conditions:
+#
+#    The above copyright notice and this permission notice shall be
+#    included in all copies or substantial portions of the Software.
+#
+#    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+#    EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+#    MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+#    NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+#    LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+#    OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+#    WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+"""Display a subunit stream through a colorized unittest test runner."""
+
+import heapq
+import sys
+import unittest
+
+import six
+import subunit
+import testtools
+
+
+class _AnsiColorizer(object):
+    """Colorizer allows callers to write text in a particular color.
+
+    A colorizer is an object that loosely wraps around a stream, allowing
+    callers to write text to the stream in a particular color.
+
+    Colorizer classes must implement C{supported()} and C{write(text, color)}.
+    """
+    _colors = dict(black=30, red=31, green=32, yellow=33,
+                   blue=34, magenta=35, cyan=36, white=37)
+
+    def __init__(self, stream):
+        self.stream = stream
+
+    def supported(cls, stream=sys.stdout):
+        """Check is the current platform supports coloring terminal output.
+
+        A class method that returns True if the current platform supports
+        coloring terminal output using this method. Returns False otherwise.
+        """
+        if not stream.isatty():
+            return False  # auto color only on TTYs
+        try:
+            import curses
+        except ImportError:
+            return False
+        else:
+            try:
+                try:
+                    return curses.tigetnum("colors") > 2
+                except curses.error:
+                    curses.setupterm()
+                    return curses.tigetnum("colors") > 2
+            except Exception:
+                # guess false in case of error
+                return False
+    supported = classmethod(supported)
+
+    def write(self, text, color):
+        """Write the given text to the stream in the given color.
+
+        @param text: Text to be written to the stream.
+
+        @param color: A string label for a color. e.g. 'red', 'white'.
+        """
+        color = self._colors[color]
+        self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text))
+
+
+class _Win32Colorizer(object):
+    """See _AnsiColorizer docstring."""
+    def __init__(self, stream):
+        import win32console
+        red, green, blue, bold = (win32console.FOREGROUND_RED,
+                                  win32console.FOREGROUND_GREEN,
+                                  win32console.FOREGROUND_BLUE,
+                                  win32console.FOREGROUND_INTENSITY)
+        self.stream = stream
+        self.screenBuffer = win32console.GetStdHandle(
+            win32console.STD_OUT_HANDLE)
+        self._colors = {
+            'normal': red | green | blue,
+            'red': red | bold,
+            'green': green | bold,
+            'blue': blue | bold,
+            'yellow': red | green | bold,
+            'magenta': red | blue | bold,
+            'cyan': green | blue | bold,
+            'white': red | green | blue | bold,
+        }
+
+    def supported(cls, stream=sys.stdout):
+        try:
+            import win32console
+            screenBuffer = win32console.GetStdHandle(
+                win32console.STD_OUT_HANDLE)
+        except ImportError:
+            return False
+        import pywintypes
+        try:
+            screenBuffer.SetConsoleTextAttribute(
+                win32console.FOREGROUND_RED |
+                win32console.FOREGROUND_GREEN |
+                win32console.FOREGROUND_BLUE)
+        except pywintypes.error:
+            return False
+        else:
+            return True
+    supported = classmethod(supported)
+
+    def write(self, text, color):
+        color = self._colors[color]
+        self.screenBuffer.SetConsoleTextAttribute(color)
+        self.stream.write(text)
+        self.screenBuffer.SetConsoleTextAttribute(self._colors['normal'])
+
+
+class _NullColorizer(object):
+    """See _AnsiColorizer docstring."""
+    def __init__(self, stream):
+        self.stream = stream
+
+    def supported(cls, stream=sys.stdout):
+        return True
+    supported = classmethod(supported)
+
+    def write(self, text, color):
+        self.stream.write(text)
+
+
+def get_elapsed_time_color(elapsed_time):
+    if elapsed_time > 1.0:
+        return 'red'
+    elif elapsed_time > 0.25:
+        return 'yellow'
+    else:
+        return 'green'
+
+
+class OpenStackTestResult(testtools.TestResult):
+    def __init__(self, stream, descriptions, verbosity):
+        super(OpenStackTestResult, self).__init__()
+        self.stream = stream
+        self.showAll = verbosity > 1
+        self.num_slow_tests = 10
+        self.slow_tests = []  # this is a fixed-sized heap
+        self.colorizer = None
+        # NOTE(vish): reset stdout for the terminal check
+        stdout = sys.stdout
+        sys.stdout = sys.__stdout__
+        for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]:
+            if colorizer.supported():
+                self.colorizer = colorizer(self.stream)
+                break
+        sys.stdout = stdout
+        self.start_time = None
+        self.last_time = {}
+        self.results = {}
+        self.last_written = None
+
+    def _writeElapsedTime(self, elapsed):
+        color = get_elapsed_time_color(elapsed)
+        self.colorizer.write("  %.2f" % elapsed, color)
+
+    def _addResult(self, test, *args):
+        try:
+            name = test.id()
+        except AttributeError:
+            name = 'Unknown.unknown'
+        test_class, test_name = name.rsplit('.', 1)
+
+        elapsed = (self._now() - self.start_time).total_seconds()
+        item = (elapsed, test_class, test_name)
+        if len(self.slow_tests) >= self.num_slow_tests:
+            heapq.heappushpop(self.slow_tests, item)
+        else:
+            heapq.heappush(self.slow_tests, item)
+
+        self.results.setdefault(test_class, [])
+        self.results[test_class].append((test_name, elapsed) + args)
+        self.last_time[test_class] = self._now()
+        self.writeTests()
+
+    def _writeResult(self, test_name, elapsed, long_result, color,
+                     short_result, success):
+        if self.showAll:
+            self.stream.write('    %s' % str(test_name).ljust(66))
+            self.colorizer.write(long_result, color)
+            if success:
+                self._writeElapsedTime(elapsed)
+            self.stream.writeln()
+        else:
+            self.colorizer.write(short_result, color)
+
+    def addSuccess(self, test):
+        super(OpenStackTestResult, self).addSuccess(test)
+        self._addResult(test, 'OK', 'green', '.', True)
+
+    def addFailure(self, test, err):
+        if test.id() == 'process-returncode':
+            return
+        super(OpenStackTestResult, self).addFailure(test, err)
+        self._addResult(test, 'FAIL', 'red', 'F', False)
+
+    def addError(self, test, err):
+        super(OpenStackTestResult, self).addFailure(test, err)
+        self._addResult(test, 'ERROR', 'red', 'E', False)
+
+    def addSkip(self, test, reason=None, details=None):
+        super(OpenStackTestResult, self).addSkip(test, reason, details)
+        self._addResult(test, 'SKIP', 'blue', 'S', True)
+
+    def startTest(self, test):
+        self.start_time = self._now()
+        super(OpenStackTestResult, self).startTest(test)
+
+    def writeTestCase(self, cls):
+        if not self.results.get(cls):
+            return
+        if cls != self.last_written:
+            self.colorizer.write(cls, 'white')
+            self.stream.writeln()
+        for result in self.results[cls]:
+            self._writeResult(*result)
+        del self.results[cls]
+        self.stream.flush()
+        self.last_written = cls
+
+    def writeTests(self):
+        time = self.last_time.get(self.last_written, self._now())
+        if not self.last_written or (self._now() - time).total_seconds() > 2.0:
+            diff = 3.0
+            while diff > 2.0:
+                classes = self.results.keys()
+                oldest = min(classes, key=lambda x: self.last_time[x])
+                diff = (self._now() - self.last_time[oldest]).total_seconds()
+                self.writeTestCase(oldest)
+        else:
+            self.writeTestCase(self.last_written)
+
+    def done(self):
+        self.stopTestRun()
+
+    def stopTestRun(self):
+        for cls in list(six.iterkeys(self.results)):
+            self.writeTestCase(cls)
+        self.stream.writeln()
+        self.writeSlowTests()
+
+    def writeSlowTests(self):
+        # Pare out 'fast' tests
+        slow_tests = [item for item in self.slow_tests
+                      if get_elapsed_time_color(item[0]) != 'green']
+        if slow_tests:
+            slow_total_time = sum(item[0] for item in slow_tests)
+            slow = ("Slowest %i tests took %.2f secs:"
+                    % (len(slow_tests), slow_total_time))
+            self.colorizer.write(slow, 'yellow')
+            self.stream.writeln()
+            last_cls = None
+            # sort by name
+            for elapsed, cls, name in sorted(slow_tests,
+                                             key=lambda x: x[1] + x[2]):
+                if cls != last_cls:
+                    self.colorizer.write(cls, 'white')
+                    self.stream.writeln()
+                last_cls = cls
+                self.stream.write('    %s' % str(name).ljust(68))
+                self._writeElapsedTime(elapsed)
+                self.stream.writeln()
+
+    def printErrors(self):
+        if self.showAll:
+            self.stream.writeln()
+        self.printErrorList('ERROR', self.errors)
+        self.printErrorList('FAIL', self.failures)
+
+    def printErrorList(self, flavor, errors):
+        for test, err in errors:
+            self.colorizer.write("=" * 70, 'red')
+            self.stream.writeln()
+            self.colorizer.write(flavor, 'red')
+            self.stream.writeln(": %s" % test.id())
+            self.colorizer.write("-" * 70, 'red')
+            self.stream.writeln()
+            self.stream.writeln("%s" % err)
+
+
+test = subunit.ProtocolTestCase(sys.stdin, passthrough=None)
+
+if sys.version_info[0:2] <= (2, 6):
+    runner = unittest.TextTestRunner(verbosity=2)
+else:
+    runner = unittest.TextTestRunner(verbosity=2,
+                                     resultclass=OpenStackTestResult)
+
+if runner.run(test).wasSuccessful():
+    exit_code = 0
+else:
+    exit_code = 1
+sys.exit(exit_code)
diff --git a/keystone-moon/tools/convert_to_sqlite.sh b/keystone-moon/tools/convert_to_sqlite.sh
new file mode 100755 (executable)
index 0000000..feb3202
--- /dev/null
@@ -0,0 +1,71 @@
+#!/bin/sh
+
+# ================================================================
+#
+# Convert a mysql database dump into something sqlite3 understands.
+#
+# Adapted from
+# http://stackoverflow.com/questions/489277/script-to-convert-mysql-dump-sql-file-into-format-that-can-be-imported-into-sqlit
+#
+# (c) 2010 Martin Czygan <martin.czygan@gmail.com>
+#
+# ================================================================
+
+if [ "$#" -lt 1 ]; then
+   echo "Usage: $0 <dumpname>"
+   exit
+fi
+
+SRC=$1
+DST=$1.sqlite3.sql
+DB=$1.sqlite3.db
+ERR=$1.sqlite3.err
+
+cat $SRC |
+grep -v ' KEY "' |
+grep -v ' KEY `' |
+grep -v ' UNIQUE KEY "' |
+grep -v ' UNIQUE KEY `' |
+grep -v ' PRIMARY KEY ' |
+
+sed 's/ENGINE=MyISAM/ /g' |
+sed 's/DEFAULT/ /g' |
+sed 's/CHARSET=[a-zA-Z0-9]*/ /g' |
+sed 's/AUTO_INCREMENT=[0-9]*/ /g' |
+
+sed 's/\\r\\n/\\n/g' |
+sed 's/\\"/"/g' |
+sed '/^SET/d' |
+sed 's/ unsigned / /g' |
+sed 's/ auto_increment/ primary key autoincrement/g' |
+sed 's/ AUTO_INCREMENT/ primary key autoincrement/g' |
+sed 's/ smallint([0-9]*) / integer /g' |
+sed 's/ tinyint([0-9]*) / integer /g' |
+sed 's/ int([0-9]*) / integer /g' |
+sed 's/ character set [^ ]* / /g' |
+sed 's/ enum([^)]*) / varchar(255) /g' |
+sed 's/ on update [^,]*//g' |
+sed 's/UNLOCK TABLES;//g' |
+sed 's/LOCK TABLES [^;]*;//g' |
+perl -e 'local $/;$_=<>;s/,\n\)/\n\)/gs;print "begin;\n";print;print "commit;\n"' |
+perl -pe '
+  if (/^(INSERT.+?)\(/) {
+     $a=$1;
+     s/\\'\''/'\'\''/g;
+     s/\\n/\n/g;
+     s/\),\(/\);\n$a\(/g;
+  }
+  ' > $DST
+
+cat $DST | sqlite3 $DB > $ERR
+
+ERRORS=`cat $ERR | wc -l`
+
+if [ "$ERRORS" -eq "0" ]; then
+       echo "Conversion completed without error. Your db is ready under: $DB"
+       echo "\$ sqlite3 $DB"
+  rm -f $ERR
+else
+   echo "There were errors during conversion. \
+       Please review $ERR and $DST for details."
+fi
diff --git a/keystone-moon/tools/install_venv.py b/keystone-moon/tools/install_venv.py
new file mode 100644 (file)
index 0000000..e01ae3f
--- /dev/null
@@ -0,0 +1,72 @@
+# Copyright 2013 IBM Corp.
+# Copyright 2012 OpenStack Foundation
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+virtualenv installation script
+"""
+
+import os
+import sys
+
+import install_venv_common as install_venv
+
+
+def print_help():
+    help = """
+    Keystone development environment setup is complete.
+
+    Keystone development uses virtualenv to track and manage Python
+    dependencies while in development and testing.
+
+    To activate the Keystone virtualenv for the extent of your current shell
+    session you can run:
+
+    $ source .venv/bin/activate
+
+    Or, if you prefer, you can run commands in the virtualenv on a case by case
+    basis by running:
+
+    $ tools/with_venv.sh <your command>
+
+    Also, make test will automatically use the virtualenv.
+    """
+    print help
+
+
+def main(argv):
+    root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+    venv = os.path.join(root, '.venv')
+    pip_requires = os.path.join(root, 'requirements.txt')
+    test_requires = os.path.join(root, 'test-requirements.txt')
+    py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
+    project = 'Keystone'
+    install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
+                                       py_version, project)
+    options = install.parse_args(argv)
+    install.check_python_version()
+    install.check_dependencies()
+    install.create_virtualenv(no_site_packages=options.no_site_packages)
+    install.install_dependencies()
+    install.run_command([os.path.join(venv, 'bin/python'),
+                        'setup.py', 'develop'])
+    print_help()
+
+
+if __name__ == '__main__':
+    main(sys.argv)
diff --git a/keystone-moon/tools/install_venv_common.py b/keystone-moon/tools/install_venv_common.py
new file mode 100644 (file)
index 0000000..e279159
--- /dev/null
@@ -0,0 +1,172 @@
+# Copyright 2013 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Provides methods needed by installation script for OpenStack development
+virtual environments.
+
+Since this script is used to bootstrap a virtualenv from the system's Python
+environment, it should be kept strictly compatible with Python 2.6.
+
+Synced in from openstack-common
+"""
+
+from __future__ import print_function
+
+import optparse
+import os
+import subprocess
+import sys
+
+
+class InstallVenv(object):
+
+    def __init__(self, root, venv, requirements,
+                 test_requirements, py_version,
+                 project):
+        self.root = root
+        self.venv = venv
+        self.requirements = requirements
+        self.test_requirements = test_requirements
+        self.py_version = py_version
+        self.project = project
+
+    def die(self, message, *args):
+        print(message % args, file=sys.stderr)
+        sys.exit(1)
+
+    def check_python_version(self):
+        if sys.version_info < (2, 6):
+            self.die("Need Python Version >= 2.6")
+
+    def run_command_with_code(self, cmd, redirect_output=True,
+                              check_exit_code=True):
+        """Runs a command in an out-of-process shell.
+
+        Returns the output of that command. Working directory is self.root.
+        """
+        if redirect_output:
+            stdout = subprocess.PIPE
+        else:
+            stdout = None
+
+        proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout)
+        output = proc.communicate()[0]
+        if check_exit_code and proc.returncode != 0:
+            self.die('Command "%s" failed.\n%s', ' '.join(cmd), output)
+        return (output, proc.returncode)
+
+    def run_command(self, cmd, redirect_output=True, check_exit_code=True):
+        return self.run_command_with_code(cmd, redirect_output,
+                                          check_exit_code)[0]
+
+    def get_distro(self):
+        if (os.path.exists('/etc/fedora-release') or
+                os.path.exists('/etc/redhat-release')):
+            return Fedora(
+                self.root, self.venv, self.requirements,
+                self.test_requirements, self.py_version, self.project)
+        else:
+            return Distro(
+                self.root, self.venv, self.requirements,
+                self.test_requirements, self.py_version, self.project)
+
+    def check_dependencies(self):
+        self.get_distro().install_virtualenv()
+
+    def create_virtualenv(self, no_site_packages=True):
+        """Creates the virtual environment and installs PIP.
+
+        Creates the virtual environment and installs PIP only into the
+        virtual environment.
+        """
+        if not os.path.isdir(self.venv):
+            print('Creating venv...', end=' ')
+            if no_site_packages:
+                self.run_command(['virtualenv', '-q', '--no-site-packages',
+                                 self.venv])
+            else:
+                self.run_command(['virtualenv', '-q', self.venv])
+            print('done.')
+        else:
+            print("venv already exists...")
+            pass
+
+    def pip_install(self, *args):
+        self.run_command(['tools/with_venv.sh',
+                         'pip', 'install', '--upgrade'] + list(args),
+                         redirect_output=False)
+
+    def install_dependencies(self):
+        print('Installing dependencies with pip (this can take a while)...')
+
+        # First things first, make sure our venv has the latest pip and
+        # setuptools and pbr
+        self.pip_install('pip>=1.4')
+        self.pip_install('setuptools')
+        self.pip_install('pbr')
+
+        self.pip_install('-r', self.requirements, '-r', self.test_requirements)
+
+    def parse_args(self, argv):
+        """Parses command-line arguments."""
+        parser = optparse.OptionParser()
+        parser.add_option('-n', '--no-site-packages',
+                          action='store_true',
+                          help="Do not inherit packages from global Python "
+                               "install.")
+        return parser.parse_args(argv[1:])[0]
+
+
+class Distro(InstallVenv):
+
+    def check_cmd(self, cmd):
+        return bool(self.run_command(['which', cmd],
+                    check_exit_code=False).strip())
+
+    def install_virtualenv(self):
+        if self.check_cmd('virtualenv'):
+            return
+
+        if self.check_cmd('easy_install'):
+            print('Installing virtualenv via easy_install...', end=' ')
+            if self.run_command(['easy_install', 'virtualenv']):
+                print('Succeeded')
+                return
+            else:
+                print('Failed')
+
+        self.die('ERROR: virtualenv not found.\n\n%s development'
+                 ' requires virtualenv, please install it using your'
+                 ' favorite package management tool' % self.project)
+
+
+class Fedora(Distro):
+    """This covers all Fedora-based distributions.
+
+    Includes: Fedora, RHEL, CentOS, Scientific Linux
+    """
+
+    def check_pkg(self, pkg):
+        return self.run_command_with_code(['rpm', '-q', pkg],
+                                          check_exit_code=False)[1] == 0
+
+    def install_virtualenv(self):
+        if self.check_cmd('virtualenv'):
+            return
+
+        if not self.check_pkg('python-virtualenv'):
+            self.die("Please install 'python-virtualenv'.")
+
+        super(Fedora, self).install_virtualenv()
diff --git a/keystone-moon/tools/pretty_tox.sh b/keystone-moon/tools/pretty_tox.sh
new file mode 100644 (file)
index 0000000..01b67a8
--- /dev/null
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -o pipefail
+
+TESTRARGS=$1
+python setup.py testr --testr-args="--subunit $TESTRARGS" | subunit-trace -f
+retval=$?
+# NOTE(mtreinish) The pipe above would eat the slowest display from pbr's testr
+# wrapper so just manually print the slowest tests.
+echo -e "\nSlowest Tests:\n"
+testr slowest
+exit $retval
diff --git a/keystone-moon/tools/sample_data.sh b/keystone-moon/tools/sample_data.sh
new file mode 100755 (executable)
index 0000000..55ab9d2
--- /dev/null
@@ -0,0 +1,240 @@
+#!/usr/bin/env bash
+
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Sample initial data for Keystone using python-keystoneclient
+#
+# This script is based on the original DevStack keystone_data.sh script.
+#
+# It demonstrates how to bootstrap Keystone with an administrative user
+# using the OS_SERVICE_TOKEN and OS_SERVICE_ENDPOINT environment variables
+# and the administrative API.  It will get the admin_token (OS_SERVICE_TOKEN)
+# and admin_port from keystone.conf if available.
+#
+# Disable creation of endpoints by setting DISABLE_ENDPOINTS environment variable.
+# Use this with the Catalog Templated backend.
+#
+# A EC2-compatible credential is created for the admin user and
+# placed in etc/ec2rc.
+#
+# Tenant               User      Roles
+# -------------------------------------------------------
+# demo                 admin     admin
+# service              glance    admin
+# service              nova      admin
+# service              ec2       admin
+# service              swift     admin
+
+# By default, passwords used are those in the OpenStack Install and Deploy Manual.
+# One can override these (publicly known, and hence, insecure) passwords by setting the appropriate
+# environment variables. A common default password for all the services can be used by
+# setting the "SERVICE_PASSWORD" environment variable.
+
+ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete}
+NOVA_PASSWORD=${NOVA_PASSWORD:-${SERVICE_PASSWORD:-nova}}
+GLANCE_PASSWORD=${GLANCE_PASSWORD:-${SERVICE_PASSWORD:-glance}}
+EC2_PASSWORD=${EC2_PASSWORD:-${SERVICE_PASSWORD:-ec2}}
+SWIFT_PASSWORD=${SWIFT_PASSWORD:-${SERVICE_PASSWORD:-swiftpass}}
+
+CONTROLLER_PUBLIC_ADDRESS=${CONTROLLER_PUBLIC_ADDRESS:-localhost}
+CONTROLLER_ADMIN_ADDRESS=${CONTROLLER_ADMIN_ADDRESS:-localhost}
+CONTROLLER_INTERNAL_ADDRESS=${CONTROLLER_INTERNAL_ADDRESS:-localhost}
+
+TOOLS_DIR=$(cd $(dirname "$0") && pwd)
+KEYSTONE_CONF=${KEYSTONE_CONF:-/etc/keystone/keystone.conf}
+if [[ -r "$KEYSTONE_CONF" ]]; then
+    EC2RC="$(dirname "$KEYSTONE_CONF")/ec2rc"
+elif [[ -r "$TOOLS_DIR/../etc/keystone.conf" ]]; then
+    # assume git checkout
+    KEYSTONE_CONF="$TOOLS_DIR/../etc/keystone.conf"
+    EC2RC="$TOOLS_DIR/../etc/ec2rc"
+else
+    KEYSTONE_CONF=""
+    EC2RC="ec2rc"
+fi
+
+# Extract some info from Keystone's configuration file
+if [[ -r "$KEYSTONE_CONF" ]]; then
+    CONFIG_SERVICE_TOKEN=$(sed 's/[[:space:]]//g' $KEYSTONE_CONF | grep ^admin_token= | cut -d'=' -f2)
+    if [[ -z "${CONFIG_SERVICE_TOKEN}" ]]; then
+        # default config options are commented out, so lets try those
+        CONFIG_SERVICE_TOKEN=$(sed 's/[[:space:]]//g' $KEYSTONE_CONF | grep ^\#admin_token= | cut -d'=' -f2)
+    fi
+    CONFIG_ADMIN_PORT=$(sed 's/[[:space:]]//g' $KEYSTONE_CONF | grep ^admin_port= | cut -d'=' -f2)
+    if [[ -z "${CONFIG_ADMIN_PORT}" ]]; then
+        # default config options are commented out, so lets try those
+        CONFIG_ADMIN_PORT=$(sed 's/[[:space:]]//g' $KEYSTONE_CONF | grep ^\#admin_port= | cut -d'=' -f2)
+    fi
+fi
+
+export OS_SERVICE_TOKEN=${OS_SERVICE_TOKEN:-$CONFIG_SERVICE_TOKEN}
+if [[ -z "$OS_SERVICE_TOKEN" ]]; then
+    echo "No service token found."
+    echo "Set OS_SERVICE_TOKEN manually from keystone.conf admin_token."
+    exit 1
+fi
+
+export OS_SERVICE_ENDPOINT=${OS_SERVICE_ENDPOINT:-http://$CONTROLLER_PUBLIC_ADDRESS:${CONFIG_ADMIN_PORT:-35357}/v2.0}
+
+function get_id () {
+    echo `"$@" | grep ' id ' | awk '{print $4}'`
+}
+
+#
+# Default tenant
+#
+DEMO_TENANT=$(get_id keystone tenant-create --name=demo \
+                                            --description "Default Tenant")
+
+ADMIN_USER=$(get_id keystone user-create --name=admin \
+                                         --pass="${ADMIN_PASSWORD}")
+
+ADMIN_ROLE=$(get_id keystone role-create --name=admin)
+
+keystone user-role-add --user-id $ADMIN_USER \
+                       --role-id $ADMIN_ROLE \
+                       --tenant-id $DEMO_TENANT
+
+#
+# Service tenant
+#
+SERVICE_TENANT=$(get_id keystone tenant-create --name=service \
+                                               --description "Service Tenant")
+
+GLANCE_USER=$(get_id keystone user-create --name=glance \
+                                          --pass="${GLANCE_PASSWORD}")
+
+keystone user-role-add --user-id $GLANCE_USER \
+                       --role-id $ADMIN_ROLE \
+                       --tenant-id $SERVICE_TENANT
+
+NOVA_USER=$(get_id keystone user-create --name=nova \
+                                        --pass="${NOVA_PASSWORD}" \
+                                        --tenant-id $SERVICE_TENANT)
+
+keystone user-role-add --user-id $NOVA_USER \
+                       --role-id $ADMIN_ROLE \
+                       --tenant-id $SERVICE_TENANT
+
+EC2_USER=$(get_id keystone user-create --name=ec2 \
+                                       --pass="${EC2_PASSWORD}" \
+                                       --tenant-id $SERVICE_TENANT)
+
+keystone user-role-add --user-id $EC2_USER \
+                       --role-id $ADMIN_ROLE \
+                       --tenant-id $SERVICE_TENANT
+
+SWIFT_USER=$(get_id keystone user-create --name=swift \
+                                         --pass="${SWIFT_PASSWORD}" \
+                                         --tenant-id $SERVICE_TENANT)
+
+keystone user-role-add --user-id $SWIFT_USER \
+                       --role-id $ADMIN_ROLE \
+                       --tenant-id $SERVICE_TENANT
+
+#
+# Keystone service
+#
+KEYSTONE_SERVICE=$(get_id \
+keystone service-create --name=keystone \
+                        --type=identity \
+                        --description="Keystone Identity Service")
+if [[ -z "$DISABLE_ENDPOINTS" ]]; then
+    keystone endpoint-create --region RegionOne --service-id $KEYSTONE_SERVICE \
+        --publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:\$(public_port)s/v2.0" \
+        --adminurl "http://$CONTROLLER_ADMIN_ADDRESS:\$(admin_port)s/v2.0" \
+        --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:\$(public_port)s/v2.0"
+fi
+
+#
+# Nova service
+#
+NOVA_SERVICE=$(get_id \
+keystone service-create --name=nova \
+                        --type=compute \
+                        --description="Nova Compute Service")
+if [[ -z "$DISABLE_ENDPOINTS" ]]; then
+    keystone endpoint-create --region RegionOne --service-id $NOVA_SERVICE \
+        --publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:8774/v2/\$(tenant_id)s" \
+        --adminurl "http://$CONTROLLER_ADMIN_ADDRESS:8774/v2/\$(tenant_id)s" \
+        --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8774/v2/\$(tenant_id)s"
+fi
+
+#
+# Volume service
+#
+VOLUME_SERVICE=$(get_id \
+keystone service-create --name=volume \
+                        --type=volume \
+                        --description="Nova Volume Service")
+if [[ -z "$DISABLE_ENDPOINTS" ]]; then
+    keystone endpoint-create --region RegionOne --service-id $VOLUME_SERVICE \
+        --publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:8776/v1/\$(tenant_id)s" \
+        --adminurl "http://$CONTROLLER_ADMIN_ADDRESS:8776/v1/\$(tenant_id)s" \
+        --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8776/v1/\$(tenant_id)s"
+fi
+
+#
+# Image service
+#
+GLANCE_SERVICE=$(get_id \
+keystone service-create --name=glance \
+                        --type=image \
+                        --description="Glance Image Service")
+if [[ -z "$DISABLE_ENDPOINTS" ]]; then
+    keystone endpoint-create --region RegionOne --service-id $GLANCE_SERVICE \
+        --publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:9292" \
+        --adminurl "http://$CONTROLLER_ADMIN_ADDRESS:9292" \
+        --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:9292"
+fi
+
+#
+# EC2 service
+#
+EC2_SERVICE=$(get_id \
+keystone service-create --name=ec2 \
+                        --type=ec2 \
+                        --description="EC2 Compatibility Layer")
+if [[ -z "$DISABLE_ENDPOINTS" ]]; then
+    keystone endpoint-create --region RegionOne --service-id $EC2_SERVICE \
+        --publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:8773/services/Cloud" \
+        --adminurl "http://$CONTROLLER_ADMIN_ADDRESS:8773/services/Admin" \
+        --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8773/services/Cloud"
+fi
+
+#
+# Swift service
+#
+SWIFT_SERVICE=$(get_id \
+keystone service-create --name=swift \
+                        --type="object-store" \
+                        --description="Swift Service")
+if [[ -z "$DISABLE_ENDPOINTS" ]]; then
+    keystone endpoint-create --region RegionOne --service-id $SWIFT_SERVICE \
+        --publicurl   "http://$CONTROLLER_PUBLIC_ADDRESS:8080/v1/AUTH_\$(tenant_id)s" \
+        --adminurl    "http://$CONTROLLER_ADMIN_ADDRESS:8080/v1" \
+        --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8080/v1/AUTH_\$(tenant_id)s"
+fi
+
+# create ec2 creds and parse the secret and access key returned
+RESULT=$(keystone ec2-credentials-create --tenant-id=$SERVICE_TENANT --user-id=$ADMIN_USER)
+ADMIN_ACCESS=`echo "$RESULT" | grep access | awk '{print $4}'`
+ADMIN_SECRET=`echo "$RESULT" | grep secret | awk '{print $4}'`
+
+# write the secret and access to ec2rc
+cat > $EC2RC <<EOF
+ADMIN_ACCESS=$ADMIN_ACCESS
+ADMIN_SECRET=$ADMIN_SECRET
+EOF
diff --git a/keystone-moon/tools/with_venv.sh b/keystone-moon/tools/with_venv.sh
new file mode 100755 (executable)
index 0000000..7303990
--- /dev/null
@@ -0,0 +1,7 @@
+#!/bin/bash
+TOOLS_PATH=${TOOLS_PATH:-$(dirname $0)}
+VENV_PATH=${VENV_PATH:-${TOOLS_PATH}}
+VENV_DIR=${VENV_NAME:-/../.venv}
+TOOLS=${TOOLS_PATH}
+VENV=${VENV:-${VENV_PATH}/${VENV_DIR}}
+source ${VENV}/bin/activate && "$@"
diff --git a/keystone-moon/tox.ini b/keystone-moon/tox.ini
new file mode 100644 (file)
index 0000000..366a682
--- /dev/null
@@ -0,0 +1,111 @@
+[tox]
+minversion = 1.6
+skipsdist = True
+envlist = py27,py33,py34,pep8,docs,sample_config
+
+[testenv]
+usedevelop = True
+install_command = pip install -U {opts} {packages}
+setenv = VIRTUAL_ENV={envdir}
+deps = -r{toxinidir}/requirements.txt
+       -r{toxinidir}/test-requirements.txt
+commands = bash tools/pretty_tox.sh '{posargs}'
+whitelist_externals = bash
+
+[testenv:py33]
+deps = -r{toxinidir}/requirements-py3.txt
+       -r{toxinidir}/test-requirements-py3.txt
+       nose
+commands =
+  nosetests --with-coverage --cover-package=keystone \
+      --exclude test_ldap \
+      keystone/tests/test_auth_plugin.py \
+      keystone/tests/test_backend.py \
+      keystone/tests/test_backend_rules.py \
+      keystone/tests/test_cache_backend_mongo.py \
+      keystone/tests/test_driver_hints.py \
+      keystone/tests/test_hacking_checks.py \
+      keystone/tests/test_injection.py \
+      keystone/tests/test_matchers.py \
+      keystone/tests/test_policy.py \
+      keystone/tests/test_singular_plural.py \
+      keystone/tests/test_sizelimit.py \
+      keystone/tests/test_sql_migrate_extensions.py \
+      keystone/tests/test_token_bind.py \
+      keystone/tests/test_url_middleware.py \
+      keystone/tests/unit/common/test_utils.py \
+      keystone/tests/test_validation.py \
+      keystone/tests/test_v3_controller.py \
+      keystone/tests/test_wsgi.py \
+      keystone/tests/unit
+
+[testenv:py34]
+deps = -r{toxinidir}/requirements-py3.txt
+       -r{toxinidir}/test-requirements-py3.txt
+       nose
+commands =
+  nosetests --with-coverage --cover-package=keystone \
+      --exclude test_ldap \
+      keystone/tests/test_auth_plugin.py \
+      keystone/tests/test_backend.py \
+      keystone/tests/test_backend_rules.py \
+      keystone/tests/test_cache_backend_mongo.py \
+      keystone/tests/test_driver_hints.py \
+      keystone/tests/test_hacking_checks.py \
+      keystone/tests/test_injection.py \
+      keystone/tests/test_matchers.py \
+      keystone/tests/test_policy.py \
+      keystone/tests/test_singular_plural.py \
+      keystone/tests/test_sizelimit.py \
+      keystone/tests/test_sql_migrate_extensions.py \
+      keystone/tests/test_token_bind.py \
+      keystone/tests/test_url_middleware.py \
+      keystone/tests/unit/common/test_utils.py \
+      keystone/tests/test_validation.py \
+      keystone/tests/test_v3_controller.py \
+      keystone/tests/test_wsgi.py \
+      keystone/tests/unit
+
+[testenv:pep8]
+commands =
+  flake8 {posargs}
+  # Run bash8 during pep8 runs to ensure violations are caught by
+  # the check and gate queues
+  bashate run_tests.sh examples/pki/gen_pki.sh
+  # Check that .po and .pot files are valid.
+  bash -c "find keystone -type f -regex '.*\.pot?' -print0| \
+           xargs -0 -n 1 msgfmt --check-format -o /dev/null"
+
+[tox:jenkins]
+downloadcache = ~/cache/pip
+
+[testenv:cover]
+commands = python setup.py testr --coverage --testr-args='{posargs}'
+
+[testenv:venv]
+commands = {posargs}
+
+[testenv:debug]
+commands = oslo_debug_helper {posargs}
+
+[flake8]
+filename= *.py,keystone-all,keystone-manage
+show-source = true
+
+# H405  multi line docstring summary not separated with an empty line
+ignore = H405
+
+exclude=.venv,.git,.tox,build,dist,doc,*openstack/common*,*lib/python*,*egg,tools,vendor,.update-venv,*.ini,*.po,*.pot
+max-complexity=24
+
+[testenv:docs]
+commands=
+    python setup.py build_sphinx
+
+[testenv:sample_config]
+commands = oslo-config-generator --config-file=config-generator/keystone.conf
+
+[hacking]
+import_exceptions =
+  keystone.i18n
+local-check-factory = keystone.hacking.checks.factory